diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__init__.py b/detail_item/__init__.py similarity index 100% rename from env/Lib/site-packages/pip/_vendor/html5lib/filters/__init__.py rename to detail_item/__init__.py diff --git a/detail_item/admin.py b/detail_item/admin.py new file mode 100644 index 0000000000000000000000000000000000000000..8c38f3f3dad51e4585f3984282c2a4bec5349c1e --- /dev/null +++ b/detail_item/admin.py @@ -0,0 +1,3 @@ +from django.contrib import admin + +# Register your models here. diff --git a/detail_item/apps.py b/detail_item/apps.py new file mode 100644 index 0000000000000000000000000000000000000000..29c5f55bf2bbbf4b733972478146846323dbe9ba --- /dev/null +++ b/detail_item/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class DetailItemConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'detail_item' diff --git a/env/Lib/site-packages/pip-21.1.3.dist-info/REQUESTED b/detail_item/migrations/__init__.py similarity index 100% rename from env/Lib/site-packages/pip-21.1.3.dist-info/REQUESTED rename to detail_item/migrations/__init__.py diff --git a/detail_item/models.py b/detail_item/models.py new file mode 100644 index 0000000000000000000000000000000000000000..f94ad44eb37686ad79ccebfdde1bf67f47748bb0 --- /dev/null +++ b/detail_item/models.py @@ -0,0 +1,8 @@ +from django.db import models + +# Create your models here. + +class Item(models.Model): + id = models.IntegerField() + nama = models.CharField(max_length=100) + deskripsi = models.TextField() \ No newline at end of file diff --git a/detail_item/templates/template.html b/detail_item/templates/template.html new file mode 100644 index 0000000000000000000000000000000000000000..a2507bef9d2449cc6a56ea95425f3de9ea6d27e5 --- /dev/null +++ b/detail_item/templates/template.html @@ -0,0 +1,3 @@ +<div> +{{item}} +</div> \ No newline at end of file diff --git a/detail_item/tests.py b/detail_item/tests.py new file mode 100644 index 0000000000000000000000000000000000000000..7ce503c2dd97ba78597f6ff6e4393132753573f6 --- /dev/null +++ b/detail_item/tests.py @@ -0,0 +1,3 @@ +from django.test import TestCase + +# Create your tests here. diff --git a/detail_item/urls.py b/detail_item/urls.py new file mode 100644 index 0000000000000000000000000000000000000000..dbb9a996c492b5f49e7ccd0f218f1355471d3657 --- /dev/null +++ b/detail_item/urls.py @@ -0,0 +1,8 @@ +from django.urls import path +from detail_item.views import * + +app_name = 'purchase_requisiton' + +urlpatterns = [ + path('', show_detail, name='show_detail'), +] \ No newline at end of file diff --git a/detail_item/views.py b/detail_item/views.py new file mode 100644 index 0000000000000000000000000000000000000000..d76dfc8a6faba143c998e8cdce54d0432d01e17a --- /dev/null +++ b/detail_item/views.py @@ -0,0 +1,10 @@ +from django.shortcuts import render +from detail_item.models import Item + +# Create your views here. +def show_detail(request, id): + item = Item.objects.get(id=id) + context = { + 'item': item + } + return render(request, "template.html", context) \ No newline at end of file diff --git a/env/Lib/site-packages/pip-21.1.3.dist-info/INSTALLER b/env/Lib/site-packages/pip-21.1.3.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a32041e49332e5e81c2d363dc418d68..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip-21.1.3.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/env/Lib/site-packages/pip-21.1.3.dist-info/LICENSE.txt b/env/Lib/site-packages/pip-21.1.3.dist-info/LICENSE.txt deleted file mode 100644 index 00addc2725a66c965dbefacd3692ecbbc52a80d8..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip-21.1.3.dist-info/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2008-2021 The pip developers (see AUTHORS.txt file) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/env/Lib/site-packages/pip-21.1.3.dist-info/METADATA b/env/Lib/site-packages/pip-21.1.3.dist-info/METADATA deleted file mode 100644 index 7625ce6d841d0d6b5d90a8b9af7b7cb73f6b11a0..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip-21.1.3.dist-info/METADATA +++ /dev/null @@ -1,91 +0,0 @@ -Metadata-Version: 2.1 -Name: pip -Version: 21.1.3 -Summary: The PyPA recommended tool for installing Python packages. -Home-page: https://pip.pypa.io/ -Author: The pip developers -Author-email: distutils-sig@python.org -License: MIT -Project-URL: Documentation, https://pip.pypa.io -Project-URL: Source, https://github.com/pypa/pip -Project-URL: Changelog, https://pip.pypa.io/en/stable/news/ -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Topic :: Software Development :: Build Tools -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Requires-Python: >=3.6 - -pip - The Python Package Installer -================================== - -.. image:: https://img.shields.io/pypi/v/pip.svg - :target: https://pypi.org/project/pip/ - -.. image:: https://readthedocs.org/projects/pip/badge/?version=latest - :target: https://pip.pypa.io/en/latest - -pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes. - -Please take a look at our documentation for how to install and use pip: - -* `Installation`_ -* `Usage`_ - -We release updates regularly, with a new version every 3 months. Find more details in our documentation: - -* `Release notes`_ -* `Release process`_ - -In pip 20.3, we've `made a big improvement to the heart of pip`_; `learn more`_. We want your input, so `sign up for our user experience research studies`_ to help us do it right. - -**Note**: pip 21.0, in January 2021, removed Python 2 support, per pip's `Python 2 support policy`_. Please migrate to Python 3. - -If you find bugs, need help, or want to talk to the developers, please use our mailing lists or chat rooms: - -* `Issue tracking`_ -* `Discourse channel`_ -* `User IRC`_ - -If you want to get involved head over to GitHub to get the source code, look at our development documentation and feel free to jump on the developer mailing lists and chat rooms: - -* `GitHub page`_ -* `Development documentation`_ -* `Development mailing list`_ -* `Development IRC`_ - -Code of Conduct ---------------- - -Everyone interacting in the pip project's codebases, issue trackers, chat -rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_. - -.. _package installer: https://packaging.python.org/guides/tool-recommendations/ -.. _Python Package Index: https://pypi.org -.. _Installation: https://pip.pypa.io/en/stable/installing.html -.. _Usage: https://pip.pypa.io/en/stable/ -.. _Release notes: https://pip.pypa.io/en/stable/news.html -.. _Release process: https://pip.pypa.io/en/latest/development/release-process/ -.. _GitHub page: https://github.com/pypa/pip -.. _Development documentation: https://pip.pypa.io/en/latest/development -.. _made a big improvement to the heart of pip: https://pyfound.blogspot.com/2020/11/pip-20-3-new-resolver.html -.. _learn more: https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-3-2020 -.. _sign up for our user experience research studies: https://pyfound.blogspot.com/2020/03/new-pip-resolver-to-roll-out-this-year.html -.. _Python 2 support policy: https://pip.pypa.io/en/latest/development/release-process/#python-2-support -.. _Issue tracking: https://github.com/pypa/pip/issues -.. _Discourse channel: https://discuss.python.org/c/packaging -.. _Development mailing list: https://mail.python.org/mailman3/lists/distutils-sig.python.org/ -.. _User IRC: https://webchat.freenode.net/?channels=%23pypa -.. _Development IRC: https://webchat.freenode.net/?channels=%23pypa-dev -.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md - - diff --git a/env/Lib/site-packages/pip-21.1.3.dist-info/RECORD b/env/Lib/site-packages/pip-21.1.3.dist-info/RECORD deleted file mode 100644 index 128282abd52719a39347e2c310453769cd9306da..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip-21.1.3.dist-info/RECORD +++ /dev/null @@ -1,797 +0,0 @@ -../../Scripts/pip.exe,sha256=ky1cKir6Y6HIL0MVYE3P_6WjqlxF2IKqrUiHiJfGARY,106410 -../../Scripts/pip3.9.exe,sha256=ky1cKir6Y6HIL0MVYE3P_6WjqlxF2IKqrUiHiJfGARY,106410 -../../Scripts/pip3.exe,sha256=ky1cKir6Y6HIL0MVYE3P_6WjqlxF2IKqrUiHiJfGARY,106410 -pip-21.1.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pip-21.1.3.dist-info/LICENSE.txt,sha256=I6c2HCsVgQKLxiO52ivSSZeryqR4Gs5q1ESjeUT42uE,1090 -pip-21.1.3.dist-info/METADATA,sha256=S9OQK4VANzrp6XjVrdlA7h3FxOxK5ArkXGOMK0q8_zM,4103 -pip-21.1.3.dist-info/RECORD,, -pip-21.1.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip-21.1.3.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92 -pip-21.1.3.dist-info/entry_points.txt,sha256=HtfDOwpUlr9s73jqLQ6wF9V0_0qvUXJwCBz7Vwx0Ue0,125 -pip-21.1.3.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pip/__init__.py,sha256=mLA-Prha-ozWaC8iu58VAOzOYpTR4NklaU7Jz1OeRGA,368 -pip/__main__.py,sha256=mXwWDftNLMKfwVqKFWGE_uuBZvGSIiUELhLkeysIuZc,1198 -pip/__pycache__/__init__.cpython-39.pyc,, -pip/__pycache__/__main__.cpython-39.pyc,, -pip/_internal/__init__.py,sha256=XvJ1JIumQnfLNFxVRdf_xrbhkTg1WMUrf2GzrH27F3A,410 -pip/_internal/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/__pycache__/build_env.cpython-39.pyc,, -pip/_internal/__pycache__/cache.cpython-39.pyc,, -pip/_internal/__pycache__/configuration.cpython-39.pyc,, -pip/_internal/__pycache__/exceptions.cpython-39.pyc,, -pip/_internal/__pycache__/main.cpython-39.pyc,, -pip/_internal/__pycache__/pyproject.cpython-39.pyc,, -pip/_internal/__pycache__/self_outdated_check.cpython-39.pyc,, -pip/_internal/__pycache__/wheel_builder.cpython-39.pyc,, -pip/_internal/build_env.py,sha256=2hFtbEoO4vA0FxehN_e2oXZ_3E3tAvKpnVmc8sOYjv0,9746 -pip/_internal/cache.py,sha256=6VONtoReGZbBd7sqY1n6hwkdWC4iz3tmXwXwZjpjZKw,9958 -pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132 -pip/_internal/cli/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/cli/__pycache__/autocompletion.cpython-39.pyc,, -pip/_internal/cli/__pycache__/base_command.cpython-39.pyc,, -pip/_internal/cli/__pycache__/cmdoptions.cpython-39.pyc,, -pip/_internal/cli/__pycache__/command_context.cpython-39.pyc,, -pip/_internal/cli/__pycache__/main.cpython-39.pyc,, -pip/_internal/cli/__pycache__/main_parser.cpython-39.pyc,, -pip/_internal/cli/__pycache__/parser.cpython-39.pyc,, -pip/_internal/cli/__pycache__/progress_bars.cpython-39.pyc,, -pip/_internal/cli/__pycache__/req_command.cpython-39.pyc,, -pip/_internal/cli/__pycache__/spinners.cpython-39.pyc,, -pip/_internal/cli/__pycache__/status_codes.cpython-39.pyc,, -pip/_internal/cli/autocompletion.py,sha256=r2GQSaHHim1LwPhMaO9MPeKdsSv5H8S9ElVsmByQNew,6350 -pip/_internal/cli/base_command.py,sha256=26MHnlzZSC-Wk2j2OGsBDs5cl2ladrovJyVy1_2g0Zk,7741 -pip/_internal/cli/cmdoptions.py,sha256=52JIyP5C6yT8DpT1O2ZseAY-vMvLTb8FqO0g85OFYMs,28999 -pip/_internal/cli/command_context.py,sha256=k2JF5WPsP1MNKaXWK8jZFbJhYffzkdvGaPsL53tZbDU,815 -pip/_internal/cli/main.py,sha256=G_OsY66FZRtmLrMJ4k3m77tmtsRRRQd3_-qle1lvmng,2483 -pip/_internal/cli/main_parser.py,sha256=G70Z1fXLYzeJuuotgwKwq-daCJ0jCmmHxx6aFHz6WAQ,2642 -pip/_internal/cli/parser.py,sha256=rx4w6IgD0Obi7t1k9mV0zlYhy_DuCoaDCqhkUKMOFNU,11097 -pip/_internal/cli/progress_bars.py,sha256=ck_ILji6aRTG0zxXajnPWIpQTGxTzm3nscZOxwNmTWo,8576 -pip/_internal/cli/req_command.py,sha256=jf1fe9MVmFaZJCloIvlL5UgeJtsvlVUQObjQLLT7-HQ,16965 -pip/_internal/cli/spinners.py,sha256=VLdSWCvyk3KokujLyBf_QKYcGbrePQoPB4v7jqG7xyA,5347 -pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116 -pip/_internal/commands/__init__.py,sha256=v-xml8oMwrQhCpmApkpcMOE97Mp8QaBxoRObnGS43_8,3659 -pip/_internal/commands/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/commands/__pycache__/cache.cpython-39.pyc,, -pip/_internal/commands/__pycache__/check.cpython-39.pyc,, -pip/_internal/commands/__pycache__/completion.cpython-39.pyc,, -pip/_internal/commands/__pycache__/configuration.cpython-39.pyc,, -pip/_internal/commands/__pycache__/debug.cpython-39.pyc,, -pip/_internal/commands/__pycache__/download.cpython-39.pyc,, -pip/_internal/commands/__pycache__/freeze.cpython-39.pyc,, -pip/_internal/commands/__pycache__/hash.cpython-39.pyc,, -pip/_internal/commands/__pycache__/help.cpython-39.pyc,, -pip/_internal/commands/__pycache__/install.cpython-39.pyc,, -pip/_internal/commands/__pycache__/list.cpython-39.pyc,, -pip/_internal/commands/__pycache__/search.cpython-39.pyc,, -pip/_internal/commands/__pycache__/show.cpython-39.pyc,, -pip/_internal/commands/__pycache__/uninstall.cpython-39.pyc,, -pip/_internal/commands/__pycache__/wheel.cpython-39.pyc,, -pip/_internal/commands/cache.py,sha256=AELf98RWR_giU9wl0RSXf-MsTyO5G_iwO0iHoF4Fbmc,7414 -pip/_internal/commands/check.py,sha256=Dt0w7NqFp8o_45J7w32GQrKezsz2vwo_U8UmsHD9YNI,1587 -pip/_internal/commands/completion.py,sha256=UxS09s8rEnU08AAiN3gHdQIjU4XGSlv5SJ3rIJdTyhA,2951 -pip/_internal/commands/configuration.py,sha256=X1fdVdEg8MHFtArU-3bM6WBNax1E7Z7qszPEdlK1zqo,9206 -pip/_internal/commands/debug.py,sha256=yntOplw93VZoQAVBB3BXPKuqbam4mT6TErastFwFy3s,6806 -pip/_internal/commands/download.py,sha256=zv8S_DN2-k6K0VSR3yCPLSrLehoYkj3IvyO1Ho8t8V4,4993 -pip/_internal/commands/freeze.py,sha256=vPVguwBb15ubv8Es9oPSyWePBe2cq39QxjU4KizeTwk,3431 -pip/_internal/commands/hash.py,sha256=ip64AsJ6EFUEaWKDvsZmdQHks1JTEgrDjH5byl-IYyc,1713 -pip/_internal/commands/help.py,sha256=6Mnzrak_j-yE3psDCqi2GxISJqIZJ04DObKU9QhnxME,1149 -pip/_internal/commands/install.py,sha256=aFvZQfPrMrHDb6jjbmrVlyvDxMIeX3ZcZKSQvY6c0KI,27135 -pip/_internal/commands/list.py,sha256=jfqDS4xvm6WV8rHVSmvpaI811ukvD4OiPZwGGKMwwkI,11331 -pip/_internal/commands/search.py,sha256=EwcGPkDDTwFMpi2PBKhPuWX2YBMPcy7Ox1WFcWnouaw,5598 -pip/_internal/commands/show.py,sha256=sz2vbxh4l7Bj4jKlkDGTHYD6I8_duSpSUFVxUiH44xQ,6866 -pip/_internal/commands/uninstall.py,sha256=EDcx3a03l3U8tpZ2p4ffIdn45hY2YFEmq9yoeccF2ow,3216 -pip/_internal/commands/wheel.py,sha256=wKGSksuYjjhgOYa_jD6ulaKpPXaUzPiyzfRNNT4DOio,6233 -pip/_internal/configuration.py,sha256=QBLfhv-sbP-oR08NFxSYnv_mLB-SgtNOsWXAF9tDEcM,13725 -pip/_internal/distributions/__init__.py,sha256=ow1iPW_Qp-TOyOU-WghOKC8vAv1_Syk1zETZVO_vKEE,864 -pip/_internal/distributions/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/distributions/__pycache__/base.cpython-39.pyc,, -pip/_internal/distributions/__pycache__/installed.cpython-39.pyc,, -pip/_internal/distributions/__pycache__/sdist.cpython-39.pyc,, -pip/_internal/distributions/__pycache__/wheel.cpython-39.pyc,, -pip/_internal/distributions/base.py,sha256=UVndaok0jOHrLH0JqN0YzlxVEnvFQumYy37diY3ZCuE,1245 -pip/_internal/distributions/installed.py,sha256=uaTMPvY3hr_M1BCy107vJHWspKMJgrPxv30W3_zZZ0Q,667 -pip/_internal/distributions/sdist.py,sha256=co8fNR8qIhHRLBncwV92oJ7e8IOCGPgEsbEFdNPk1Yk,3900 -pip/_internal/distributions/wheel.py,sha256=n9MqNoWyMqNscfbNeeqh1bztoZUiB5x1H9h4tFfiJUw,1205 -pip/_internal/exceptions.py,sha256=2JQJSS68oggR_ZIOA-h1U2DRADURbkQn9Nf4EZWZ834,13170 -pip/_internal/index/__init__.py,sha256=vpt-JeTZefh8a-FC22ZeBSXFVbuBcXSGiILhQZJaNpQ,30 -pip/_internal/index/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/index/__pycache__/collector.cpython-39.pyc,, -pip/_internal/index/__pycache__/package_finder.cpython-39.pyc,, -pip/_internal/index/__pycache__/sources.cpython-39.pyc,, -pip/_internal/index/collector.py,sha256=aEXtHK0La4nGP7mu5N5CQ3tmfjaczLwbGi8Ar4oGz5o,18192 -pip/_internal/index/package_finder.py,sha256=3J9Rzq1NAO2p_zDb4fv33GeBBBOYusV9kXtAn2j6eCU,37294 -pip/_internal/index/sources.py,sha256=SVyPitv08-Qalh2_Bk5diAJ9GAA_d-a93koouQodAG0,6557 -pip/_internal/locations/__init__.py,sha256=9EXRxCpyiMClU87-P5E66tcFxybcA_KzLrzcK2Vt7zs,4826 -pip/_internal/locations/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/locations/__pycache__/_distutils.cpython-39.pyc,, -pip/_internal/locations/__pycache__/_sysconfig.cpython-39.pyc,, -pip/_internal/locations/__pycache__/base.cpython-39.pyc,, -pip/_internal/locations/_distutils.py,sha256=L5flRSr9BH0lBwPUl61cyBc1OnVD06FOENkDMRjyg38,5212 -pip/_internal/locations/_sysconfig.py,sha256=Tt8gkN7shxbqoUlzqM19myiBRzbft9CzkmcSS4YHk1s,5959 -pip/_internal/locations/base.py,sha256=QbkpgmzIbWBnUL2_3qu29sqCNewoqYbkVw8KmigRe2c,1478 -pip/_internal/main.py,sha256=BZ0vkdqgpoteTo1A1Q8ovFe8EzgKFJWOUjPmIUQfGCY,351 -pip/_internal/metadata/__init__.py,sha256=KINR8ZYO_ilc2pkV3t5KcQLzWLNc3GjZDklGWTVJ-zU,1471 -pip/_internal/metadata/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/metadata/__pycache__/base.cpython-39.pyc,, -pip/_internal/metadata/__pycache__/pkg_resources.cpython-39.pyc,, -pip/_internal/metadata/base.py,sha256=6BiB_b3lvNHYIVKbzrDhi0bJmSls5Q1K-iBeHWlKnIw,4750 -pip/_internal/metadata/pkg_resources.py,sha256=4FVPxYFABQ_1tbh_CRBzK4x0_SIgH1uCKx2ZLyhkouQ,4248 -pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63 -pip/_internal/models/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/models/__pycache__/candidate.cpython-39.pyc,, -pip/_internal/models/__pycache__/direct_url.cpython-39.pyc,, -pip/_internal/models/__pycache__/format_control.cpython-39.pyc,, -pip/_internal/models/__pycache__/index.cpython-39.pyc,, -pip/_internal/models/__pycache__/link.cpython-39.pyc,, -pip/_internal/models/__pycache__/scheme.cpython-39.pyc,, -pip/_internal/models/__pycache__/search_scope.cpython-39.pyc,, -pip/_internal/models/__pycache__/selection_prefs.cpython-39.pyc,, -pip/_internal/models/__pycache__/target_python.cpython-39.pyc,, -pip/_internal/models/__pycache__/wheel.cpython-39.pyc,, -pip/_internal/models/candidate.py,sha256=LlyGF2SMGjeet9bLbEAzAWDP82Wcp3342Ysa7tCW_9M,1001 -pip/_internal/models/direct_url.py,sha256=VrnJNOqcPznfNarjQJavsx2tgG7GfcLa6PyZCuf_L7A,6555 -pip/_internal/models/format_control.py,sha256=l2jp47mWsJp7-LxMs05l9T-qFg9Z5PwdyP9R7Xc_VZQ,2629 -pip/_internal/models/index.py,sha256=asMraZVPI0snye404GztEpXgKerj1yAFmZl2p3eN4Bg,1092 -pip/_internal/models/link.py,sha256=5wdHbGDLbafSdYpo2Ky7F9RRo226zRy6ik3cLH_8Kwc,7472 -pip/_internal/models/scheme.py,sha256=iqceC7gKiTn2ZLgCOgGQbcmo49TRg9EnQUSsQH3U-7A,770 -pip/_internal/models/search_scope.py,sha256=4uGNEqYrz4ku6_WzowqivuMvN0fj5XQ03WB14YjcN5U,4613 -pip/_internal/models/selection_prefs.py,sha256=aNRDL97Gz3yWJW3og0yuvOkU02UL8OeNQDuDatZ8SDo,1947 -pip/_internal/models/target_python.py,sha256=SLGG3z9Pj_CiA5jmMnNDv2MN3ST3keVuanVDzTvO5pM,3962 -pip/_internal/models/wheel.py,sha256=MWjxQkBNXI6XOWiTuzMG7uONhFu8xA94OqD_9BuIsVc,3614 -pip/_internal/network/__init__.py,sha256=jf6Tt5nV_7zkARBrKojIXItgejvoegVJVKUbhAa5Ioc,50 -pip/_internal/network/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/network/__pycache__/auth.cpython-39.pyc,, -pip/_internal/network/__pycache__/cache.cpython-39.pyc,, -pip/_internal/network/__pycache__/download.cpython-39.pyc,, -pip/_internal/network/__pycache__/lazy_wheel.cpython-39.pyc,, -pip/_internal/network/__pycache__/session.cpython-39.pyc,, -pip/_internal/network/__pycache__/utils.cpython-39.pyc,, -pip/_internal/network/__pycache__/xmlrpc.cpython-39.pyc,, -pip/_internal/network/auth.py,sha256=d8Df0fy01P1jJlF3XDMM8ACyktR1cN9zURG-ye1ncc0,11833 -pip/_internal/network/cache.py,sha256=J_xpsLWbRrlCSUcQhA5-TuT5LWIlpVtTH4fZ1XSjyb4,2213 -pip/_internal/network/download.py,sha256=8frb2bINOf-jbmFPapKbyEO9sjXJWJG6OJaW4hQ9r3s,6243 -pip/_internal/network/lazy_wheel.py,sha256=XMfrDK1IBy44L3Gx3UZ2B8s90VRXDa96520IOPmzmOU,7924 -pip/_internal/network/session.py,sha256=VHeiorPflYPNWK2pM_q22c-H5gmRBDh9UKCJW3VAUFI,16247 -pip/_internal/network/utils.py,sha256=uqT6QkO9NHUwqTw3gHBWMQFdaYqYabB423QUZuiQD3c,4072 -pip/_internal/network/xmlrpc.py,sha256=CL1WBOTgxPwbcZ6QubZ4pXQXjb7qTTFpTUFe-ZaWkcA,1703 -pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/operations/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/operations/__pycache__/check.cpython-39.pyc,, -pip/_internal/operations/__pycache__/freeze.cpython-39.pyc,, -pip/_internal/operations/__pycache__/prepare.cpython-39.pyc,, -pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/operations/build/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/operations/build/__pycache__/metadata.cpython-39.pyc,, -pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-39.pyc,, -pip/_internal/operations/build/__pycache__/wheel.cpython-39.pyc,, -pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-39.pyc,, -pip/_internal/operations/build/metadata.py,sha256=jJp05Rrp0AMsQb7izDXbNGC1LtPNwOhHQj7cRM5324c,1165 -pip/_internal/operations/build/metadata_legacy.py,sha256=ECMBhLEPEQv6PUUCpPCXW-wN9QRXdY45PNXJv7BZKTU,1917 -pip/_internal/operations/build/wheel.py,sha256=WYLMxuxqN3ahJTQk2MI9hdmZKBpFyxHeNpUdO0PybxU,1106 -pip/_internal/operations/build/wheel_legacy.py,sha256=NOJhTYMYljdbizFo_WjkaKGWG1SEZ6aByrBdCrrsZB8,3227 -pip/_internal/operations/check.py,sha256=OtMZ2ff0zk8Ghpl7eIXySZ4D8pCUfzPAYNpGTxw1qWU,5245 -pip/_internal/operations/freeze.py,sha256=D-ex0Bwy6E0EVS_gHlixlEpKDpRxFZnUmTy7nf8s7ts,9999 -pip/_internal/operations/install/__init__.py,sha256=mX7hyD2GNBO2mFGokDQ30r_GXv7Y_PLdtxcUv144e-s,51 -pip/_internal/operations/install/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/operations/install/__pycache__/editable_legacy.cpython-39.pyc,, -pip/_internal/operations/install/__pycache__/legacy.cpython-39.pyc,, -pip/_internal/operations/install/__pycache__/wheel.cpython-39.pyc,, -pip/_internal/operations/install/editable_legacy.py,sha256=bjBObfE6sz3UmGI7y4-GCgKa2WmTgnWlFFU7b-i0sQs,1396 -pip/_internal/operations/install/legacy.py,sha256=f59fQbNLO2rvl8bNQm_CuW6dgPvXXQ7y5apulWZi01E,4177 -pip/_internal/operations/install/wheel.py,sha256=1gV2G-owlA2iwcbxYAc4BOTiPRRGB8TzpuU0wuhM2VQ,29960 -pip/_internal/operations/prepare.py,sha256=AXHNg1iGceg1lyqDqbcabmAFIfQ1k1cIfgmVY5JCWoo,24850 -pip/_internal/pyproject.py,sha256=bN_dliFVxorLITxCEzT0UmPYFoSqk_vGBtM1QwiQays,7061 -pip/_internal/req/__init__.py,sha256=lRNHBv0ZAZNbSwmXU-XUdm66gsiNmuiBDi1DFYJ4hIQ,2983 -pip/_internal/req/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/req/__pycache__/constructors.cpython-39.pyc,, -pip/_internal/req/__pycache__/req_file.cpython-39.pyc,, -pip/_internal/req/__pycache__/req_install.cpython-39.pyc,, -pip/_internal/req/__pycache__/req_set.cpython-39.pyc,, -pip/_internal/req/__pycache__/req_tracker.cpython-39.pyc,, -pip/_internal/req/__pycache__/req_uninstall.cpython-39.pyc,, -pip/_internal/req/constructors.py,sha256=4sinGd7srKhI94DV6XO-qRX2M6Kr907OFmsfklKrt64,16267 -pip/_internal/req/req_file.py,sha256=nPIFl2Mi9UDGhrj-K0E3_QugF7tl3UBDty1czbIF7fk,18000 -pip/_internal/req/req_install.py,sha256=JyTkSucNku1uwqkl2Do0SS-EcqA362WoVLtpbu-rPfA,32523 -pip/_internal/req/req_set.py,sha256=AutsaiV2s-2ILwtWtTA4OJW_ZLRg4GXg6wM0Y_hZb1k,7778 -pip/_internal/req/req_tracker.py,sha256=XuPweX1lbJXT2gSkCXICS5hna6byme5PeQp4Ok8-R2o,4391 -pip/_internal/req/req_uninstall.py,sha256=gACinTIcScZGw81qLaFdTj9KGXlVuCpru7XvHGjIE-E,23468 -pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/resolution/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/resolution/__pycache__/base.cpython-39.pyc,, -pip/_internal/resolution/base.py,sha256=T4QnfShJErpPWe4iOiO7VmXuz1bxe20LLNs33AUslYM,563 -pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/resolution/legacy/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/resolution/legacy/__pycache__/resolver.cpython-39.pyc,, -pip/_internal/resolution/legacy/resolver.py,sha256=OF_6Yh4hrFfJ4u0HLF4ZRBlA8lBHUfAaFnhuVKIQhPM,17934 -pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/base.cpython-39.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-39.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-39.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-39.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-39.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-39.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-39.pyc,, -pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-39.pyc,, -pip/_internal/resolution/resolvelib/base.py,sha256=MbakyqSotBGVJpI3kApqqP2fPPZih9DgsfkpuFd-ADM,5677 -pip/_internal/resolution/resolvelib/candidates.py,sha256=dEKSuK9B5M52c1SugB43zXnnxgNWNTa7hCCwItSX61c,19976 -pip/_internal/resolution/resolvelib/factory.py,sha256=taqeDmXk0kAY9EVqSMhEJriY02MSShbZvt9VqEAgkw4,25446 -pip/_internal/resolution/resolvelib/found_candidates.py,sha256=FzxKczhel3GhViOIEfGHUfUQ6rN3U0blMMUuu-blHfU,5410 -pip/_internal/resolution/resolvelib/provider.py,sha256=HYITnjs7hcxDGANCDdL4qg2MJ1aw1jA9cMyxNP2mLrk,7673 -pip/_internal/resolution/resolvelib/reporter.py,sha256=xgaCtXLj791A_qRfV9Y1nXGeaWVq3JE0ygIA3YNRWq0,2765 -pip/_internal/resolution/resolvelib/requirements.py,sha256=fF2RH6VCanTuF-iwu8tZY8Bh0FakDBTw7tkDJyTsy9E,6047 -pip/_internal/resolution/resolvelib/resolver.py,sha256=3hlnrZklszFUwGQFF33nLkEO8kxz4vZ3_uKp_L8YvmE,12085 -pip/_internal/self_outdated_check.py,sha256=ivoUYaGuq-Ra_DvlZvPtHhgbY97NKHYuPGzrgN2G1A8,6484 -pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/utils/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/utils/__pycache__/appdirs.cpython-39.pyc,, -pip/_internal/utils/__pycache__/compat.cpython-39.pyc,, -pip/_internal/utils/__pycache__/compatibility_tags.cpython-39.pyc,, -pip/_internal/utils/__pycache__/datetime.cpython-39.pyc,, -pip/_internal/utils/__pycache__/deprecation.cpython-39.pyc,, -pip/_internal/utils/__pycache__/direct_url_helpers.cpython-39.pyc,, -pip/_internal/utils/__pycache__/distutils_args.cpython-39.pyc,, -pip/_internal/utils/__pycache__/encoding.cpython-39.pyc,, -pip/_internal/utils/__pycache__/entrypoints.cpython-39.pyc,, -pip/_internal/utils/__pycache__/filesystem.cpython-39.pyc,, -pip/_internal/utils/__pycache__/filetypes.cpython-39.pyc,, -pip/_internal/utils/__pycache__/glibc.cpython-39.pyc,, -pip/_internal/utils/__pycache__/hashes.cpython-39.pyc,, -pip/_internal/utils/__pycache__/inject_securetransport.cpython-39.pyc,, -pip/_internal/utils/__pycache__/logging.cpython-39.pyc,, -pip/_internal/utils/__pycache__/misc.cpython-39.pyc,, -pip/_internal/utils/__pycache__/models.cpython-39.pyc,, -pip/_internal/utils/__pycache__/packaging.cpython-39.pyc,, -pip/_internal/utils/__pycache__/parallel.cpython-39.pyc,, -pip/_internal/utils/__pycache__/pkg_resources.cpython-39.pyc,, -pip/_internal/utils/__pycache__/setuptools_build.cpython-39.pyc,, -pip/_internal/utils/__pycache__/subprocess.cpython-39.pyc,, -pip/_internal/utils/__pycache__/temp_dir.cpython-39.pyc,, -pip/_internal/utils/__pycache__/unpacking.cpython-39.pyc,, -pip/_internal/utils/__pycache__/urls.cpython-39.pyc,, -pip/_internal/utils/__pycache__/virtualenv.cpython-39.pyc,, -pip/_internal/utils/__pycache__/wheel.cpython-39.pyc,, -pip/_internal/utils/appdirs.py,sha256=HCCFaOrZOnMLzRDpKXcMiFh_2kWZ-PzFdN8peLiwkNY,1222 -pip/_internal/utils/compat.py,sha256=I58tTZ3qqGZqeGVP_mERM8N7QPu71niLpxfO3Ij2jfQ,1912 -pip/_internal/utils/compatibility_tags.py,sha256=IcQEHCZJvdfKciACmXGCKt39Yog2_Q2XQKMHojA_2pg,5589 -pip/_internal/utils/datetime.py,sha256=biZdEJEQBGq8A-N7ooposipeGzmSHdI0WX60kll_AEs,255 -pip/_internal/utils/deprecation.py,sha256=CD9gU1zmDtC3Nk2TM14FVpAa_bxCMd03Kx5t3LoFwkg,3277 -pip/_internal/utils/direct_url_helpers.py,sha256=-chZUxdJkFRG-pA2MY7_Wii5U5o18o5K4AqBsWd92-c,3935 -pip/_internal/utils/distutils_args.py,sha256=KxWTaz07A_1ukCyw_pNah-i6sBvrVtdMsnF8jguDNYQ,1262 -pip/_internal/utils/encoding.py,sha256=T0cQTkGB7-s3wivLlHcKbKqvJoM0yLdo8ot89LlGdz0,1190 -pip/_internal/utils/entrypoints.py,sha256=m4UXkLZTnPsdSisQzNFiHM1CZcMK8N1CA98g4ORex2c,1066 -pip/_internal/utils/filesystem.py,sha256=a3rnoUB_HTdEbDaAUHSNMPIHqHds4UA-mLQ5bvgOjSQ,6045 -pip/_internal/utils/filetypes.py,sha256=weviVbapHWVQ_8-K-PTQ_TnYL66kZi4SrVBTmRYZXLc,761 -pip/_internal/utils/glibc.py,sha256=GM1Y2hWkOf_tumySGFg-iNbc7oilBQQrjczb_705CF8,3170 -pip/_internal/utils/hashes.py,sha256=o1qQEkqe2AqsRm_JhLoM4hkxmVtewH0ZZpQ6EBObHuU,5167 -pip/_internal/utils/inject_securetransport.py,sha256=tGl9Bgyt2IHKtB3b0B-6r3W2yYF3Og-PBe0647S3lZs,810 -pip/_internal/utils/logging.py,sha256=Bkp3QSjur3ekkunAInsGJ6ls7KF8ANTtBgGhjY0vltg,12133 -pip/_internal/utils/misc.py,sha256=F7LDb6PQIwniYwLczhU2pSAyHZ9bnTVT1yI_OduYh3w,23315 -pip/_internal/utils/models.py,sha256=qCgYyUw2mIH1pombsJ3YQsMtONZgyJ4BGwO5MJnSC4c,1329 -pip/_internal/utils/packaging.py,sha256=I1938AB7FprcVJJd6C0vSiMuCVajmrxZF55vX5j0bMo,2900 -pip/_internal/utils/parallel.py,sha256=RZF4JddPEWVbkkPCknfvpqaLfm3Pmqd_ABoCHmV4lXs,3224 -pip/_internal/utils/pkg_resources.py,sha256=jwH5JViPe-JlXLvLC0-ASfTTCRYvm0u9CwQGcWjxStI,1106 -pip/_internal/utils/setuptools_build.py,sha256=xk9sRBjUyNTHs_TvEWebVWs1GfLPN208MzpSXr9Ok_A,5047 -pip/_internal/utils/subprocess.py,sha256=uxaP3IzPiBYhG0MbdfPK_uchZAh27uZ3wO3q5hRfEyo,10036 -pip/_internal/utils/temp_dir.py,sha256=9gs3N9GQeVXRVWjJIalSpH1uj8yQXPTzarb5n1_HMVo,7950 -pip/_internal/utils/unpacking.py,sha256=PioYYwfTCn_VeYer80onhrO9Y1ggetqOPSOroG38bRQ,9032 -pip/_internal/utils/urls.py,sha256=XzjQsHGd2YDmJhoCogspPTqh6Kl5tGENRHPcwjS0JC4,1256 -pip/_internal/utils/virtualenv.py,sha256=iRTK-sD6bWpHqXcZ0ECfdpFLWatMOHFUVCIRa0L6Gu0,3564 -pip/_internal/utils/wheel.py,sha256=DOIVZaXN7bMOAeMEqzIOZHGl4OFO-KGrEqBUB848DPo,6290 -pip/_internal/vcs/__init__.py,sha256=CjyxHCgdt19l21j0tJGiQ_6Yk8m-KWmQThmYvljd1eo,571 -pip/_internal/vcs/__pycache__/__init__.cpython-39.pyc,, -pip/_internal/vcs/__pycache__/bazaar.cpython-39.pyc,, -pip/_internal/vcs/__pycache__/git.cpython-39.pyc,, -pip/_internal/vcs/__pycache__/mercurial.cpython-39.pyc,, -pip/_internal/vcs/__pycache__/subversion.cpython-39.pyc,, -pip/_internal/vcs/__pycache__/versioncontrol.cpython-39.pyc,, -pip/_internal/vcs/bazaar.py,sha256=Ay_vN-87vYSEzBqXT3RVwl40vlk56j3jy_AfQbMj4uo,2962 -pip/_internal/vcs/git.py,sha256=URUz1kSqhDhqJsr9ulaFTewP8Zjwf7oVPP7skdj9SMQ,15431 -pip/_internal/vcs/mercurial.py,sha256=2X3eIyeAWQWI2TxoPT-xuVsD6fxr7YSyHw4MR9EWz4M,5043 -pip/_internal/vcs/subversion.py,sha256=lPfCu841JAMRG_jTX_TbRZrBpKdId5eQ8t7_xI7w3L0,11876 -pip/_internal/vcs/versioncontrol.py,sha256=N60TSMbTr79ADzR61BCrk8YogUQcBBnNaLgJPTfXsfc,23086 -pip/_internal/wheel_builder.py,sha256=hW63ZmABr65rOiSRBHXu1jBUdEZw5LZiw0LaQBbz0lI,11740 -pip/_vendor/__init__.py,sha256=gCrQwPBY2OZBeedvKOLdRZ3W1LIRM60fG6d4mgW_-9Y,4760 -pip/_vendor/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/__pycache__/appdirs.cpython-39.pyc,, -pip/_vendor/__pycache__/distro.cpython-39.pyc,, -pip/_vendor/__pycache__/pyparsing.cpython-39.pyc,, -pip/_vendor/__pycache__/six.cpython-39.pyc,, -pip/_vendor/appdirs.py,sha256=M6IYRJtdZgmSPCXCSMBRB0VT3P8MdFbWCDbSLrB2Ebg,25907 -pip/_vendor/cachecontrol/__init__.py,sha256=pJtAaUxOsMPnytI1A3juAJkXYDr8krdSnsg4Yg3OBEg,302 -pip/_vendor/cachecontrol/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-39.pyc,, -pip/_vendor/cachecontrol/__pycache__/adapter.cpython-39.pyc,, -pip/_vendor/cachecontrol/__pycache__/cache.cpython-39.pyc,, -pip/_vendor/cachecontrol/__pycache__/compat.cpython-39.pyc,, -pip/_vendor/cachecontrol/__pycache__/controller.cpython-39.pyc,, -pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-39.pyc,, -pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-39.pyc,, -pip/_vendor/cachecontrol/__pycache__/serialize.cpython-39.pyc,, -pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-39.pyc,, -pip/_vendor/cachecontrol/_cmd.py,sha256=URGE0KrA87QekCG3SGPatlSPT571dZTDjNa-ZXX3pDc,1295 -pip/_vendor/cachecontrol/adapter.py,sha256=sSwaSYd93IIfCFU4tOMgSo6b2LCt_gBSaQUj8ktJFOA,4882 -pip/_vendor/cachecontrol/cache.py,sha256=1fc4wJP8HYt1ycnJXeEw5pCpeBL2Cqxx6g9Fb0AYDWQ,805 -pip/_vendor/cachecontrol/caches/__init__.py,sha256=-gHNKYvaeD0kOk5M74eOrsSgIKUtC6i6GfbmugGweEo,86 -pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-39.pyc,, -pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-39.pyc,, -pip/_vendor/cachecontrol/caches/file_cache.py,sha256=nYVKsJtXh6gJXvdn1iWyrhxvkwpQrK-eKoMRzuiwkKk,4153 -pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=HxelMpNCo-dYr2fiJDwM3hhhRmxUYtB5tXm1GpAAT4Y,856 -pip/_vendor/cachecontrol/compat.py,sha256=kHNvMRdt6s_Xwqq_9qJmr9ou3wYMOMUMxPPcwNxT8Mc,695 -pip/_vendor/cachecontrol/controller.py,sha256=CWEX3pedIM9s60suf4zZPtm_JvVgnvogMGK_OiBG5F8,14149 -pip/_vendor/cachecontrol/filewrapper.py,sha256=vACKO8Llzu_ZWyjV1Fxn1MA4TGU60N5N3GSrAFdAY2Q,2533 -pip/_vendor/cachecontrol/heuristics.py,sha256=BFGHJ3yQcxvZizfo90LLZ04T_Z5XSCXvFotrp7Us0sc,4070 -pip/_vendor/cachecontrol/serialize.py,sha256=vIa4jvq4x_KSOLdEIedoknX2aXYHQujLDFV4-F21Dno,7091 -pip/_vendor/cachecontrol/wrapper.py,sha256=5LX0uJwkNQUtYSEw3aGmGu9WY8wGipd81mJ8lG0d0M4,690 -pip/_vendor/certifi/__init__.py,sha256=SsmdmFHjHCY4VLtqwpp9P_jsOcAuHj-5c5WqoEz-oFg,62 -pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255 -pip/_vendor/certifi/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/certifi/__pycache__/__main__.cpython-39.pyc,, -pip/_vendor/certifi/__pycache__/core.cpython-39.pyc,, -pip/_vendor/certifi/cacert.pem,sha256=u3fxPT--yemLvyislQRrRBlsfY9Vq3cgBh6ZmRqCkZc,263774 -pip/_vendor/certifi/core.py,sha256=gOFd0zHYlx4krrLEn982esOtmz3djiG0BFSDhgjlvcI,2840 -pip/_vendor/chardet/__init__.py,sha256=mWZaWmvZkhwfBEAT9O1Y6nRTfKzhT7FHhQTTAujbqUA,3271 -pip/_vendor/chardet/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/big5freq.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/big5prober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/chardistribution.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/charsetprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/codingstatemachine.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/compat.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/cp949prober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/enums.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/escprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/escsm.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/eucjpprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/euckrfreq.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/euckrprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/euctwfreq.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/euctwprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/gb2312freq.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/gb2312prober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/hebrewprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/jisfreq.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/jpcntx.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/langgreekmodel.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/langrussianmodel.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/langthaimodel.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/langturkishmodel.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/latin1prober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/mbcssm.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/sjisprober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/universaldetector.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/utf8prober.cpython-39.pyc,, -pip/_vendor/chardet/__pycache__/version.cpython-39.pyc,, -pip/_vendor/chardet/big5freq.py,sha256=D_zK5GyzoVsRes0HkLJziltFQX0bKCLOrFe9_xDvO_8,31254 -pip/_vendor/chardet/big5prober.py,sha256=kBxHbdetBpPe7xrlb-e990iot64g_eGSLd32lB7_h3M,1757 -pip/_vendor/chardet/chardistribution.py,sha256=3woWS62KrGooKyqz4zQSnjFbJpa6V7g02daAibTwcl8,9411 -pip/_vendor/chardet/charsetgroupprober.py,sha256=GZLReHP6FRRn43hvSOoGCxYamErKzyp6RgOQxVeC3kg,3839 -pip/_vendor/chardet/charsetprober.py,sha256=KSmwJErjypyj0bRZmC5F5eM7c8YQgLYIjZXintZNstg,5110 -pip/_vendor/chardet/cli/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 -pip/_vendor/chardet/cli/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/chardet/cli/__pycache__/chardetect.cpython-39.pyc,, -pip/_vendor/chardet/cli/chardetect.py,sha256=XK5zqjUG2a4-y6eLHZ8ThYcp6WWUrdlmELxNypcc2SE,2747 -pip/_vendor/chardet/codingstatemachine.py,sha256=VYp_6cyyki5sHgXDSZnXW4q1oelHc3cu9AyQTX7uug8,3590 -pip/_vendor/chardet/compat.py,sha256=40zr6wICZwknxyuLGGcIOPyve8DTebBCbbvttvnmp5Q,1200 -pip/_vendor/chardet/cp949prober.py,sha256=TZ434QX8zzBsnUvL_8wm4AQVTZ2ZkqEEQL_lNw9f9ow,1855 -pip/_vendor/chardet/enums.py,sha256=Aimwdb9as1dJKZaFNUH2OhWIVBVd6ZkJJ_WK5sNY8cU,1661 -pip/_vendor/chardet/escprober.py,sha256=kkyqVg1Yw3DIOAMJ2bdlyQgUFQhuHAW8dUGskToNWSc,3950 -pip/_vendor/chardet/escsm.py,sha256=RuXlgNvTIDarndvllNCk5WZBIpdCxQ0kcd9EAuxUh84,10510 -pip/_vendor/chardet/eucjpprober.py,sha256=iD8Jdp0ISRjgjiVN7f0e8xGeQJ5GM2oeZ1dA8nbSeUw,3749 -pip/_vendor/chardet/euckrfreq.py,sha256=-7GdmvgWez4-eO4SuXpa7tBiDi5vRXQ8WvdFAzVaSfo,13546 -pip/_vendor/chardet/euckrprober.py,sha256=MqFMTQXxW4HbzIpZ9lKDHB3GN8SP4yiHenTmf8g_PxY,1748 -pip/_vendor/chardet/euctwfreq.py,sha256=No1WyduFOgB5VITUA7PLyC5oJRNzRyMbBxaKI1l16MA,31621 -pip/_vendor/chardet/euctwprober.py,sha256=13p6EP4yRaxqnP4iHtxHOJ6R2zxHq1_m8hTRjzVZ95c,1747 -pip/_vendor/chardet/gb2312freq.py,sha256=JX8lsweKLmnCwmk8UHEQsLgkr_rP_kEbvivC4qPOrlc,20715 -pip/_vendor/chardet/gb2312prober.py,sha256=gGvIWi9WhDjE-xQXHvNIyrnLvEbMAYgyUSZ65HUfylw,1754 -pip/_vendor/chardet/hebrewprober.py,sha256=c3SZ-K7hvyzGY6JRAZxJgwJ_sUS9k0WYkvMY00YBYFo,13838 -pip/_vendor/chardet/jisfreq.py,sha256=vpmJv2Bu0J8gnMVRPHMFefTRvo_ha1mryLig8CBwgOg,25777 -pip/_vendor/chardet/jpcntx.py,sha256=PYlNqRUQT8LM3cT5FmHGP0iiscFlTWED92MALvBungo,19643 -pip/_vendor/chardet/langbulgarianmodel.py,sha256=rk9CJpuxO0bObboJcv6gNgWuosYZmd8qEEds5y7DS_Y,105697 -pip/_vendor/chardet/langgreekmodel.py,sha256=S-uNQ1ihC75yhBvSux24gLFZv3QyctMwC6OxLJdX-bw,99571 -pip/_vendor/chardet/langhebrewmodel.py,sha256=DzPP6TPGG_-PV7tqspu_d8duueqm7uN-5eQ0aHUw1Gg,98776 -pip/_vendor/chardet/langhungarianmodel.py,sha256=RtJH7DZdsmaHqyK46Kkmnk5wQHiJwJPPJSqqIlpeZRc,102498 -pip/_vendor/chardet/langrussianmodel.py,sha256=THqJOhSxiTQcHboDNSc5yofc2koXXQFHFyjtyuntUfM,131180 -pip/_vendor/chardet/langthaimodel.py,sha256=R1wXHnUMtejpw0JnH_JO8XdYasME6wjVqp1zP7TKLgg,103312 -pip/_vendor/chardet/langturkishmodel.py,sha256=rfwanTptTwSycE4-P-QasPmzd-XVYgevytzjlEzBBu8,95946 -pip/_vendor/chardet/latin1prober.py,sha256=S2IoORhFk39FEFOlSFWtgVybRiP6h7BlLldHVclNkU8,5370 -pip/_vendor/chardet/mbcharsetprober.py,sha256=AR95eFH9vuqSfvLQZN-L5ijea25NOBCoXqw8s5O9xLQ,3413 -pip/_vendor/chardet/mbcsgroupprober.py,sha256=h6TRnnYq2OxG1WdD5JOyxcdVpn7dG0q-vB8nWr5mbh4,2012 -pip/_vendor/chardet/mbcssm.py,sha256=SY32wVIF3HzcjY3BaEspy9metbNSKxIIB0RKPn7tjpI,25481 -pip/_vendor/chardet/metadata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/chardet/metadata/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/chardet/metadata/__pycache__/languages.cpython-39.pyc,, -pip/_vendor/chardet/metadata/languages.py,sha256=41tLq3eLSrBEbEVVQpVGFq9K7o1ln9b1HpY1l0hCUQo,19474 -pip/_vendor/chardet/sbcharsetprober.py,sha256=nmyMyuxzG87DN6K3Rk2MUzJLMLR69MrWpdnHzOwVUwQ,6136 -pip/_vendor/chardet/sbcsgroupprober.py,sha256=hqefQuXmiFyDBArOjujH6hd6WFXlOD1kWCsxDhjx5Vc,4309 -pip/_vendor/chardet/sjisprober.py,sha256=IIt-lZj0WJqK4rmUZzKZP4GJlE8KUEtFYVuY96ek5MQ,3774 -pip/_vendor/chardet/universaldetector.py,sha256=DpZTXCX0nUHXxkQ9sr4GZxGB_hveZ6hWt3uM94cgWKs,12503 -pip/_vendor/chardet/utf8prober.py,sha256=IdD8v3zWOsB8OLiyPi-y_fqwipRFxV9Nc1eKBLSuIEw,2766 -pip/_vendor/chardet/version.py,sha256=A4CILFAd8MRVG1HoXPp45iK9RLlWyV73a1EtwE8Tvn8,242 -pip/_vendor/colorama/__init__.py,sha256=pCdErryzLSzDW5P-rRPBlPLqbBtIRNJB6cMgoeJns5k,239 -pip/_vendor/colorama/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/colorama/__pycache__/ansi.cpython-39.pyc,, -pip/_vendor/colorama/__pycache__/ansitowin32.cpython-39.pyc,, -pip/_vendor/colorama/__pycache__/initialise.cpython-39.pyc,, -pip/_vendor/colorama/__pycache__/win32.cpython-39.pyc,, -pip/_vendor/colorama/__pycache__/winterm.cpython-39.pyc,, -pip/_vendor/colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522 -pip/_vendor/colorama/ansitowin32.py,sha256=yV7CEmCb19MjnJKODZEEvMH_fnbJhwnpzo4sxZuGXmA,10517 -pip/_vendor/colorama/initialise.py,sha256=PprovDNxMTrvoNHFcL2NZjpH2XzDc8BLxLxiErfUl4k,1915 -pip/_vendor/colorama/win32.py,sha256=bJ8Il9jwaBN5BJ8bmN6FoYZ1QYuMKv2j8fGrXh7TJjw,5404 -pip/_vendor/colorama/winterm.py,sha256=2y_2b7Zsv34feAsP67mLOVc-Bgq51mdYGo571VprlrM,6438 -pip/_vendor/distlib/__init__.py,sha256=3veAk2rPznOB2gsK6tjbbh0TQMmGE5P82eE9wXq6NIk,581 -pip/_vendor/distlib/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/compat.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/database.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/index.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/locators.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/manifest.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/markers.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/metadata.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/resources.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/scripts.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/util.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/version.cpython-39.pyc,, -pip/_vendor/distlib/__pycache__/wheel.cpython-39.pyc,, -pip/_vendor/distlib/_backport/__init__.py,sha256=bqS_dTOH6uW9iGgd0uzfpPjo6vZ4xpPZ7kyfZJ2vNaw,274 -pip/_vendor/distlib/_backport/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/distlib/_backport/__pycache__/misc.cpython-39.pyc,, -pip/_vendor/distlib/_backport/__pycache__/shutil.cpython-39.pyc,, -pip/_vendor/distlib/_backport/__pycache__/sysconfig.cpython-39.pyc,, -pip/_vendor/distlib/_backport/__pycache__/tarfile.cpython-39.pyc,, -pip/_vendor/distlib/_backport/misc.py,sha256=KWecINdbFNOxSOP1fGF680CJnaC6S4fBRgEtaYTw0ig,971 -pip/_vendor/distlib/_backport/shutil.py,sha256=IX_G2NPqwecJibkIDje04bqu0xpHkfSQ2GaGdEVqM5Y,25707 -pip/_vendor/distlib/_backport/sysconfig.cfg,sha256=swZKxq9RY5e9r3PXCrlvQPMsvOdiWZBTHLEbqS8LJLU,2617 -pip/_vendor/distlib/_backport/sysconfig.py,sha256=BQHFlb6pubCl_dvT1NjtzIthylofjKisox239stDg0U,26854 -pip/_vendor/distlib/_backport/tarfile.py,sha256=Ihp7rXRcjbIKw8COm9wSePV9ARGXbSF9gGXAMn2Q-KU,92628 -pip/_vendor/distlib/compat.py,sha256=ADA56xiAxar3mU6qemlBhNbsrFPosXRhO44RzsbJPqk,41408 -pip/_vendor/distlib/database.py,sha256=Kl0YvPQKc4OcpVi7k5cFziydM1xOK8iqdxLGXgbZHV4,51059 -pip/_vendor/distlib/index.py,sha256=SXKzpQCERctxYDMp_OLee2f0J0e19ZhGdCIoMlUfUQM,21066 -pip/_vendor/distlib/locators.py,sha256=c9E4cDEacJ_uKbuE5BqAVocoWp6rsuBGTkiNDQq3zV4,52100 -pip/_vendor/distlib/manifest.py,sha256=nQEhYmgoreaBZzyFzwYsXxJARu3fo4EkunU163U16iE,14811 -pip/_vendor/distlib/markers.py,sha256=6Ac3cCfFBERexiESWIOXmg-apIP8l2esafNSX3KMy-8,4387 -pip/_vendor/distlib/metadata.py,sha256=z2KPy3h3tcDnb9Xs7nAqQ5Oz0bqjWAUFmKWcFKRoodg,38962 -pip/_vendor/distlib/resources.py,sha256=2FGv0ZHF14KXjLIlL0R991lyQQGcewOS4mJ-5n-JVnc,10766 -pip/_vendor/distlib/scripts.py,sha256=_MAj3sMuv56kuM8FsiIWXqbT0gmumPGaOR_atOzn4a4,17180 -pip/_vendor/distlib/t32.exe,sha256=NS3xBCVAld35JVFNmb-1QRyVtThukMrwZVeXn4LhaEQ,96768 -pip/_vendor/distlib/t64.exe,sha256=oAqHes78rUWVM0OtVqIhUvequl_PKhAhXYQWnUf7zR0,105984 -pip/_vendor/distlib/util.py,sha256=f2jZCPrcLCt6LcnC0gUy-Fur60tXD8reA7k4rDpHMDw,59845 -pip/_vendor/distlib/version.py,sha256=_n7F6juvQGAcn769E_SHa7fOcf5ERlEVymJ_EjPRwGw,23391 -pip/_vendor/distlib/w32.exe,sha256=lJtnZdeUxTZWya_EW5DZos_K5rswRECGspIl8ZJCIXs,90112 -pip/_vendor/distlib/w64.exe,sha256=0aRzoN2BO9NWW4ENy4_4vHkHR4qZTFZNVSAJJYlODTI,99840 -pip/_vendor/distlib/wheel.py,sha256=v6DnwTqhNHwrEVFr8_YeiTW6G4ftP_evsywNgrmdb2o,41144 -pip/_vendor/distro.py,sha256=xxMIh2a3KmippeWEHzynTdHT3_jZM0o-pos0dAWJROM,43628 -pip/_vendor/html5lib/__init__.py,sha256=BYzcKCqeEii52xDrqBFruhnmtmkiuHXFyFh-cglQ8mk,1160 -pip/_vendor/html5lib/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-39.pyc,, -pip/_vendor/html5lib/__pycache__/_inputstream.cpython-39.pyc,, -pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-39.pyc,, -pip/_vendor/html5lib/__pycache__/_utils.cpython-39.pyc,, -pip/_vendor/html5lib/__pycache__/constants.cpython-39.pyc,, -pip/_vendor/html5lib/__pycache__/html5parser.cpython-39.pyc,, -pip/_vendor/html5lib/__pycache__/serializer.cpython-39.pyc,, -pip/_vendor/html5lib/_ihatexml.py,sha256=ifOwF7pXqmyThIXc3boWc96s4MDezqRrRVp7FwDYUFs,16728 -pip/_vendor/html5lib/_inputstream.py,sha256=jErNASMlkgs7MpOM9Ve_VdLDJyFFweAjLuhVutZz33U,32353 -pip/_vendor/html5lib/_tokenizer.py,sha256=04mgA2sNTniutl2fxFv-ei5bns4iRaPxVXXHh_HrV_4,77040 -pip/_vendor/html5lib/_trie/__init__.py,sha256=nqfgO910329BEVJ5T4psVwQtjd2iJyEXQ2-X8c1YxwU,109 -pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-39.pyc,, -pip/_vendor/html5lib/_trie/__pycache__/py.cpython-39.pyc,, -pip/_vendor/html5lib/_trie/_base.py,sha256=CaybYyMro8uERQYjby2tTeSUatnWDfWroUN9N7ety5w,1013 -pip/_vendor/html5lib/_trie/py.py,sha256=wXmQLrZRf4MyWNyg0m3h81m9InhLR7GJ002mIIZh-8o,1775 -pip/_vendor/html5lib/_utils.py,sha256=Dx9AKntksRjFT1veBj7I362pf5OgIaT0zglwq43RnfU,4931 -pip/_vendor/html5lib/constants.py,sha256=Ll-yzLU_jcjyAI_h57zkqZ7aQWE5t5xA4y_jQgoUUhw,83464 -pip/_vendor/html5lib/filters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-39.pyc,, -pip/_vendor/html5lib/filters/__pycache__/base.cpython-39.pyc,, -pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-39.pyc,, -pip/_vendor/html5lib/filters/__pycache__/lint.cpython-39.pyc,, -pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-39.pyc,, -pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-39.pyc,, -pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-39.pyc,, -pip/_vendor/html5lib/filters/alphabeticalattributes.py,sha256=lViZc2JMCclXi_5gduvmdzrRxtO5Xo9ONnbHBVCsykU,919 -pip/_vendor/html5lib/filters/base.py,sha256=z-IU9ZAYjpsVsqmVt7kuWC63jR11hDMr6CVrvuao8W0,286 -pip/_vendor/html5lib/filters/inject_meta_charset.py,sha256=egDXUEHXmAG9504xz0K6ALDgYkvUrC2q15YUVeNlVQg,2945 -pip/_vendor/html5lib/filters/lint.py,sha256=jk6q56xY0ojiYfvpdP-OZSm9eTqcAdRqhCoPItemPYA,3643 -pip/_vendor/html5lib/filters/optionaltags.py,sha256=8lWT75J0aBOHmPgfmqTHSfPpPMp01T84NKu0CRedxcE,10588 -pip/_vendor/html5lib/filters/sanitizer.py,sha256=m6oGmkBhkGAnn2nV6D4hE78SCZ6WEnK9rKdZB3uXBIc,26897 -pip/_vendor/html5lib/filters/whitespace.py,sha256=8eWqZxd4UC4zlFGW6iyY6f-2uuT8pOCSALc3IZt7_t4,1214 -pip/_vendor/html5lib/html5parser.py,sha256=anr-aXre_ImfrkQ35c_rftKXxC80vJCREKe06Tq15HA,117186 -pip/_vendor/html5lib/serializer.py,sha256=_PpvcZF07cwE7xr9uKkZqh5f4UEaI8ltCU2xPJzaTpk,15759 -pip/_vendor/html5lib/treeadapters/__init__.py,sha256=A0rY5gXIe4bJOiSGRO_j_tFhngRBO8QZPzPtPw5dFzo,679 -pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-39.pyc,, -pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-39.pyc,, -pip/_vendor/html5lib/treeadapters/genshi.py,sha256=CH27pAsDKmu4ZGkAUrwty7u0KauGLCZRLPMzaO3M5vo,1715 -pip/_vendor/html5lib/treeadapters/sax.py,sha256=BKS8woQTnKiqeffHsxChUqL4q2ZR_wb5fc9MJ3zQC8s,1776 -pip/_vendor/html5lib/treebuilders/__init__.py,sha256=AysSJyvPfikCMMsTVvaxwkgDieELD5dfR8FJIAuq7hY,3592 -pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-39.pyc,, -pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-39.pyc,, -pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-39.pyc,, -pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-39.pyc,, -pip/_vendor/html5lib/treebuilders/base.py,sha256=z-o51vt9r_l2IDG5IioTOKGzZne4Fy3_Fc-7ztrOh4I,14565 -pip/_vendor/html5lib/treebuilders/dom.py,sha256=22whb0C71zXIsai5mamg6qzBEiigcBIvaDy4Asw3at0,8925 -pip/_vendor/html5lib/treebuilders/etree.py,sha256=w5ZFpKk6bAxnrwD2_BrF5EVC7vzz0L3LMi9Sxrbc_8w,12836 -pip/_vendor/html5lib/treebuilders/etree_lxml.py,sha256=9gqDjs-IxsPhBYa5cpvv2FZ1KZlG83Giusy2lFmvIkE,14766 -pip/_vendor/html5lib/treewalkers/__init__.py,sha256=OBPtc1TU5mGyy18QDMxKEyYEz0wxFUUNj5v0-XgmYhY,5719 -pip/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/html5lib/treewalkers/__pycache__/base.cpython-39.pyc,, -pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-39.pyc,, -pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-39.pyc,, -pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-39.pyc,, -pip/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-39.pyc,, -pip/_vendor/html5lib/treewalkers/base.py,sha256=ouiOsuSzvI0KgzdWP8PlxIaSNs9falhbiinAEc_UIJY,7476 -pip/_vendor/html5lib/treewalkers/dom.py,sha256=EHyFR8D8lYNnyDU9lx_IKigVJRyecUGua0mOi7HBukc,1413 -pip/_vendor/html5lib/treewalkers/etree.py,sha256=xo1L5m9VtkfpFJK0pFmkLVajhqYYVisVZn3k9kYpPkI,4551 -pip/_vendor/html5lib/treewalkers/etree_lxml.py,sha256=_b0LAVWLcVu9WaU_-w3D8f0IRSpCbjf667V-3NRdhTw,6357 -pip/_vendor/html5lib/treewalkers/genshi.py,sha256=4D2PECZ5n3ZN3qu3jMl9yY7B81jnQApBQSVlfaIuYbA,2309 -pip/_vendor/idna/__init__.py,sha256=9Nt7xpyet3DmOrPUGooDdAwmHZZu1qUAy2EaJ93kGiQ,58 -pip/_vendor/idna/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/idna/__pycache__/codec.cpython-39.pyc,, -pip/_vendor/idna/__pycache__/compat.cpython-39.pyc,, -pip/_vendor/idna/__pycache__/core.cpython-39.pyc,, -pip/_vendor/idna/__pycache__/idnadata.cpython-39.pyc,, -pip/_vendor/idna/__pycache__/intranges.cpython-39.pyc,, -pip/_vendor/idna/__pycache__/package_data.cpython-39.pyc,, -pip/_vendor/idna/__pycache__/uts46data.cpython-39.pyc,, -pip/_vendor/idna/codec.py,sha256=4RVMhqFquJgyGBKyl40ARqcgDzkDDXZUvyl1EOCRLFE,3027 -pip/_vendor/idna/compat.py,sha256=g-7Ph45nzILe_7xvxdbTebrHZq4mQWxIOH1rjMc6xrs,232 -pip/_vendor/idna/core.py,sha256=VdFGQyiit1eMKUQ2x0mNXoGThrXlRyp070mPDyLX9Yg,11849 -pip/_vendor/idna/idnadata.py,sha256=cl4x9RLdw1ZMtEEbvKwAsX-Id3AdIjO5U3HaoKM6VGs,42350 -pip/_vendor/idna/intranges.py,sha256=TY1lpxZIQWEP6tNqjZkFA5hgoMWOj1OBmnUG8ihT87E,1749 -pip/_vendor/idna/package_data.py,sha256=kxptFveZ37zbPSmKU7KMEA8Pi7h3-sM1-p2agm2PpCI,21 -pip/_vendor/idna/uts46data.py,sha256=4CZEB6ZQgmSNIATBn2V_xdW9PEgVOXAOYRzCeQGsK_E,196224 -pip/_vendor/msgpack/__init__.py,sha256=2gJwcsTIaAtCM0GMi2rU-_Y6kILeeQuqRkrQ22jSANc,1118 -pip/_vendor/msgpack/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/msgpack/__pycache__/_version.cpython-39.pyc,, -pip/_vendor/msgpack/__pycache__/exceptions.cpython-39.pyc,, -pip/_vendor/msgpack/__pycache__/ext.cpython-39.pyc,, -pip/_vendor/msgpack/__pycache__/fallback.cpython-39.pyc,, -pip/_vendor/msgpack/_version.py,sha256=dFR03oACnj4lsKd1RnwD7BPMiVI_FMygdOL1TOBEw_U,20 -pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081 -pip/_vendor/msgpack/ext.py,sha256=4l356Y4sVEcvCla2dh_cL57vh4GMhZfa3kuWHFHYz6A,6088 -pip/_vendor/msgpack/fallback.py,sha256=Rpv1Ldey8f8ueRnQznD4ARKBn9dxM2PywVNkXI8IEeE,38026 -pip/_vendor/packaging/__about__.py,sha256=j4B7IMMSqpUnYzcYd5H5WZlILXevD7Zm_n9lj_TROTw,726 -pip/_vendor/packaging/__init__.py,sha256=6enbp5XgRfjBjsI9-bn00HjHf5TH21PDMOKkJW8xw-w,562 -pip/_vendor/packaging/__pycache__/__about__.cpython-39.pyc,, -pip/_vendor/packaging/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/packaging/__pycache__/_compat.cpython-39.pyc,, -pip/_vendor/packaging/__pycache__/_structures.cpython-39.pyc,, -pip/_vendor/packaging/__pycache__/_typing.cpython-39.pyc,, -pip/_vendor/packaging/__pycache__/markers.cpython-39.pyc,, -pip/_vendor/packaging/__pycache__/requirements.cpython-39.pyc,, -pip/_vendor/packaging/__pycache__/specifiers.cpython-39.pyc,, -pip/_vendor/packaging/__pycache__/tags.cpython-39.pyc,, -pip/_vendor/packaging/__pycache__/utils.cpython-39.pyc,, -pip/_vendor/packaging/__pycache__/version.cpython-39.pyc,, -pip/_vendor/packaging/_compat.py,sha256=MXdsGpSE_W-ZrHoC87andI4LV2FAwU7HLL-eHe_CjhU,1128 -pip/_vendor/packaging/_structures.py,sha256=ozkCX8Q8f2qE1Eic3YiQ4buDVfgz2iYevY9e7R2y3iY,2022 -pip/_vendor/packaging/_typing.py,sha256=VgA0AAvsc97KB5nF89zoudOyCMEsV7FlaXzZbYqEkzA,1824 -pip/_vendor/packaging/markers.py,sha256=8DOn1c7oZ_DySBlLom_9o49GzobVGYN8-kpK_nsj8oQ,9472 -pip/_vendor/packaging/requirements.py,sha256=MHqf_FKihHC0VkOB62ZUdUyG8okEL97D4Xy_jK1yFS0,5110 -pip/_vendor/packaging/specifiers.py,sha256=RaxQ-JKyCqI5QBm6gDvboZ2K6jjLVd-pxq0kvYf28kc,32208 -pip/_vendor/packaging/tags.py,sha256=BMEL_3W3E8nXK_AXAWqmlYccsvoznFKkTBkTPR48DB8,29561 -pip/_vendor/packaging/utils.py,sha256=5vUxwCVYSmaNJFgd7KaCBpxHXQN89KIvRLvCsDzao0k,4385 -pip/_vendor/packaging/version.py,sha256=t7FpsZKmDncMn6EG28dEu_5NBZUa9_HVoiG-fsDo3oc,15974 -pip/_vendor/pep517/__init__.py,sha256=mju9elFHLEUJ23rU5Zpdj8nROdY0Vj3bp4ZgvBTs6bg,130 -pip/_vendor/pep517/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/pep517/__pycache__/build.cpython-39.pyc,, -pip/_vendor/pep517/__pycache__/check.cpython-39.pyc,, -pip/_vendor/pep517/__pycache__/colorlog.cpython-39.pyc,, -pip/_vendor/pep517/__pycache__/compat.cpython-39.pyc,, -pip/_vendor/pep517/__pycache__/dirtools.cpython-39.pyc,, -pip/_vendor/pep517/__pycache__/envbuild.cpython-39.pyc,, -pip/_vendor/pep517/__pycache__/meta.cpython-39.pyc,, -pip/_vendor/pep517/__pycache__/wrappers.cpython-39.pyc,, -pip/_vendor/pep517/build.py,sha256=Z49CmRFafX7NjoBModiibwQYa_EYz3E0F31b7D5WVvs,3456 -pip/_vendor/pep517/check.py,sha256=8LJLtfZ99zAcV4vKJ1a-odMxg2sEImD7RMNg_Ere-1Y,6082 -pip/_vendor/pep517/colorlog.py,sha256=Tk9AuYm_cLF3BKTBoSTJt9bRryn0aFojIQOwbfVUTxQ,4098 -pip/_vendor/pep517/compat.py,sha256=M-5s4VNp8rjyT76ZZ_ibnPD44DYVzSQlyCEHayjtDPw,780 -pip/_vendor/pep517/dirtools.py,sha256=2mkAkAL0mRz_elYFjRKuekTJVipH1zTn4tbf1EDev84,1129 -pip/_vendor/pep517/envbuild.py,sha256=szKUFlO50X1ahQfXwz4hD9V2VE_bz9MLVPIeidsFo4w,6041 -pip/_vendor/pep517/in_process/__init__.py,sha256=MyWoAi8JHdcBv7yXuWpUSVADbx6LSB9rZh7kTIgdA8Y,563 -pip/_vendor/pep517/in_process/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/pep517/in_process/__pycache__/_in_process.cpython-39.pyc,, -pip/_vendor/pep517/in_process/_in_process.py,sha256=XrKOTURJdia5R7i3i_OQmS89LASFXE3HQXfX63qZBIE,8438 -pip/_vendor/pep517/meta.py,sha256=8mnM5lDnT4zXQpBTliJbRGfesH7iioHwozbDxALPS9Y,2463 -pip/_vendor/pep517/wrappers.py,sha256=QYZfN1nWoq4Z2krY-UX14JLAxkdNwujYjRGf7qFc914,11044 -pip/_vendor/pkg_resources/__init__.py,sha256=XpGBfvS9fafA6bm5rx7vnxdxs7yqyoc_NnpzKApkJ64,108277 -pip/_vendor/pkg_resources/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/pkg_resources/__pycache__/py31compat.cpython-39.pyc,, -pip/_vendor/pkg_resources/py31compat.py,sha256=CRk8fkiPRDLsbi5pZcKsHI__Pbmh_94L8mr9Qy9Ab2U,562 -pip/_vendor/progress/__init__.py,sha256=fcbQQXo5np2CoQyhSH5XprkicwLZNLePR3uIahznSO0,4857 -pip/_vendor/progress/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/progress/__pycache__/bar.cpython-39.pyc,, -pip/_vendor/progress/__pycache__/counter.cpython-39.pyc,, -pip/_vendor/progress/__pycache__/spinner.cpython-39.pyc,, -pip/_vendor/progress/bar.py,sha256=QuDuVNcmXgpxtNtxO0Fq72xKigxABaVmxYGBw4J3Z_E,2854 -pip/_vendor/progress/counter.py,sha256=MznyBrvPWrOlGe4MZAlGUb9q3aODe6_aNYeAE_VNoYA,1372 -pip/_vendor/progress/spinner.py,sha256=k8JbDW94T0-WXuXfxZIFhdoNPYp3jfnpXqBnfRv5fGs,1380 -pip/_vendor/pyparsing.py,sha256=J1b4z3S_KwyJW7hKGnoN-hXW9pgMIzIP6QThyY5yJq4,273394 -pip/_vendor/requests/__init__.py,sha256=ib7nRjDadbCMOeX2sMQLcbXzy982HoKRY2LD_gWqwPM,4458 -pip/_vendor/requests/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/__version__.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/_internal_utils.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/adapters.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/api.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/auth.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/certs.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/compat.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/cookies.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/exceptions.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/help.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/hooks.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/models.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/packages.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/sessions.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/status_codes.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/structures.cpython-39.pyc,, -pip/_vendor/requests/__pycache__/utils.cpython-39.pyc,, -pip/_vendor/requests/__version__.py,sha256=k4J8c1yFRFzwGWwlN7miaDOclFtbcIs1GlnmT17YbXQ,441 -pip/_vendor/requests/_internal_utils.py,sha256=Zx3PnEUccyfsB-ie11nZVAW8qClJy0gx1qNME7rgT18,1096 -pip/_vendor/requests/adapters.py,sha256=e-bmKEApNVqFdylxuMJJfiaHdlmS_zhWhIMEzlHvGuc,21548 -pip/_vendor/requests/api.py,sha256=PlHM-HT3PQ5lyufoeGmV-nJxRi7UnUyGVh7OV7B9XV4,6496 -pip/_vendor/requests/auth.py,sha256=OMoJIVKyRLy9THr91y8rxysZuclwPB-K1Xg1zBomUhQ,10207 -pip/_vendor/requests/certs.py,sha256=nXRVq9DtGmv_1AYbwjTu9UrgAcdJv05ZvkNeaoLOZxY,465 -pip/_vendor/requests/compat.py,sha256=LQWuCR4qXk6w7-qQopXyz0WNHUdAD40k0mKnaAEf1-g,2045 -pip/_vendor/requests/cookies.py,sha256=Y-bKX6TvW3FnYlE6Au0SXtVVWcaNdFvuAwQxw-G0iTI,18430 -pip/_vendor/requests/exceptions.py,sha256=d9fJJw8YFBB9VzG9qhvxLuOx6be3c_Dwbck-dVUEAcs,3173 -pip/_vendor/requests/help.py,sha256=SJPVcoXeo7KfK4AxJN5eFVQCjr0im87tU2n7ubLsksU,3578 -pip/_vendor/requests/hooks.py,sha256=QReGyy0bRcr5rkwCuObNakbYsc7EkiKeBwG4qHekr2Q,757 -pip/_vendor/requests/models.py,sha256=UkkaVuU1tc-BKYB41dds35saisoTpaYJ2YBCFZEEfhM,34373 -pip/_vendor/requests/packages.py,sha256=njJmVifY4aSctuW3PP5EFRCxjEwMRDO6J_feG2dKWsI,695 -pip/_vendor/requests/sessions.py,sha256=BsnR-zYILgoFzJ6yq4T8ht_i0PwwPGVAxWxWaV5dcHg,30137 -pip/_vendor/requests/status_codes.py,sha256=gT79Pbs_cQjBgp-fvrUgg1dn2DQO32bDj4TInjnMPSc,4188 -pip/_vendor/requests/structures.py,sha256=msAtr9mq1JxHd-JRyiILfdFlpbJwvvFuP3rfUQT_QxE,3005 -pip/_vendor/requests/utils.py,sha256=_K9AgkN6efPe-a-zgZurXzds5PBC0CzDkyjAE2oCQFQ,30529 -pip/_vendor/resolvelib/__init__.py,sha256=QWAqNErjxqEMKl-AUccXz10aCKVmO-WmWvxUl3QOlFY,537 -pip/_vendor/resolvelib/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/resolvelib/__pycache__/providers.cpython-39.pyc,, -pip/_vendor/resolvelib/__pycache__/reporters.cpython-39.pyc,, -pip/_vendor/resolvelib/__pycache__/resolvers.cpython-39.pyc,, -pip/_vendor/resolvelib/__pycache__/structs.cpython-39.pyc,, -pip/_vendor/resolvelib/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/resolvelib/compat/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/resolvelib/compat/__pycache__/collections_abc.cpython-39.pyc,, -pip/_vendor/resolvelib/compat/collections_abc.py,sha256=uy8xUZ-NDEw916tugUXm8HgwCGiMO0f-RcdnpkfXfOs,156 -pip/_vendor/resolvelib/providers.py,sha256=bfzFDZd7UqkkAS7lUM_HeYbA-HzjKfDlle_pn_79vio,5638 -pip/_vendor/resolvelib/reporters.py,sha256=hQvvXuuEBOyEWO8KDfLsWKVjX55UFMAUwO0YZMNpzAw,1364 -pip/_vendor/resolvelib/resolvers.py,sha256=P6aq-7pY5E7zROb0zUUWqFIHEA9Lm0MWsx_bYXzUg3A,17292 -pip/_vendor/resolvelib/structs.py,sha256=Z6m4CkKJlWH4ZIKelEsKNeZqKTvyux4hqBNzY4kZzLo,4495 -pip/_vendor/six.py,sha256=U4Z_yv534W5CNyjY9i8V1OXY2SjAny8y2L5vDLhhThM,34159 -pip/_vendor/tenacity/__init__.py,sha256=MVs5n8anwg_rEiX2_QItExciIdgLDwSnukiFDyjChJw,16790 -pip/_vendor/tenacity/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/_asyncio.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/_utils.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/after.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/before.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/before_sleep.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/compat.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/nap.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/retry.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/stop.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/tornadoweb.cpython-39.pyc,, -pip/_vendor/tenacity/__pycache__/wait.cpython-39.pyc,, -pip/_vendor/tenacity/_asyncio.py,sha256=6C4Sfv9IOUYf1-0vuIoE6OGbmJrJywH0-YslrxmbxKw,2833 -pip/_vendor/tenacity/_utils.py,sha256=W1nujHum1f9i4RQpOSjqsQo9_mQtaUtNznXAmQHsL28,4555 -pip/_vendor/tenacity/after.py,sha256=KNIi2WT83r4eqA3QaXMK1zXQzkbLgVHj5uRanY6HabM,1307 -pip/_vendor/tenacity/before.py,sha256=B9pAXn6_J1UKzwTL9nFtRpOhNg8s5vGSi4bqnx4-laA,1154 -pip/_vendor/tenacity/before_sleep.py,sha256=lZEMHNaFRmdCcws3Moh4EOZ9zeo4MRxskdiUudvNuvY,1784 -pip/_vendor/tenacity/compat.py,sha256=dHonJkJlHwD2cmqLrYHYU0Tdzm2bn1-76QZSt6OCemw,739 -pip/_vendor/tenacity/nap.py,sha256=7VVudOTmuv_-C_XJlvjGcgHbV6_A2HlzymaXu8vj1d8,1280 -pip/_vendor/tenacity/retry.py,sha256=xskLGa15EsNhPPOmIUcKS7CqjaRAtWxGFNPNRjjz9UU,5463 -pip/_vendor/tenacity/stop.py,sha256=4cjSe_YPSawz6iI-QBDN0xFfE_zlKvjhFwx21ZlyD2E,2435 -pip/_vendor/tenacity/tornadoweb.py,sha256=q3XZW2A9Rky1BhUQbNHF61hM1EXQ57dA7wxPnlSOx3s,1729 -pip/_vendor/tenacity/wait.py,sha256=FAoIfIUSNf5OWJYT7nhjFC0uOVijHMBd56AJRyLN230,6017 -pip/_vendor/toml/__init__.py,sha256=kYgYzehhUx1cctsuprmjEKwnSdmQeC53cTxi7nxQrko,747 -pip/_vendor/toml/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/toml/__pycache__/decoder.cpython-39.pyc,, -pip/_vendor/toml/__pycache__/encoder.cpython-39.pyc,, -pip/_vendor/toml/__pycache__/ordered.cpython-39.pyc,, -pip/_vendor/toml/__pycache__/tz.cpython-39.pyc,, -pip/_vendor/toml/decoder.py,sha256=deDPQqpj92SG6pAtwLbgKHrIsly7hAZG-U6g2y7hyGc,38954 -pip/_vendor/toml/encoder.py,sha256=tBe93_GB21K52TlSbMiYuGeIGXH70F2WzAg-lIfVoko,9964 -pip/_vendor/toml/ordered.py,sha256=UWt5Eka90IWVBYdvLgY5PXnkBcVYpHjnw9T67rM85T8,378 -pip/_vendor/toml/tz.py,sha256=-5vg8wkg_atnVi2TnEveexIVE7T_FxBVr_-2WVfO1oA,701 -pip/_vendor/urllib3/__init__.py,sha256=j3yzHIbmW7CS-IKQJ9-PPQf_YKO8EOAey_rMW0UR7us,2763 -pip/_vendor/urllib3/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/urllib3/__pycache__/_collections.cpython-39.pyc,, -pip/_vendor/urllib3/__pycache__/_version.cpython-39.pyc,, -pip/_vendor/urllib3/__pycache__/connection.cpython-39.pyc,, -pip/_vendor/urllib3/__pycache__/connectionpool.cpython-39.pyc,, -pip/_vendor/urllib3/__pycache__/exceptions.cpython-39.pyc,, -pip/_vendor/urllib3/__pycache__/fields.cpython-39.pyc,, -pip/_vendor/urllib3/__pycache__/filepost.cpython-39.pyc,, -pip/_vendor/urllib3/__pycache__/poolmanager.cpython-39.pyc,, -pip/_vendor/urllib3/__pycache__/request.cpython-39.pyc,, -pip/_vendor/urllib3/__pycache__/response.cpython-39.pyc,, -pip/_vendor/urllib3/_collections.py,sha256=Rp1mVyBgc_UlAcp6M3at1skJBXR5J43NawRTvW2g_XY,10811 -pip/_vendor/urllib3/_version.py,sha256=2Bjk_cB49921PTvereWp8ZR3NhLNoCMAyHSGP-OesLk,63 -pip/_vendor/urllib3/connection.py,sha256=q-vf_TM3MyRbZcFn3-VCKZBSf0oEhGjv7BFeZm_7kw4,18748 -pip/_vendor/urllib3/connectionpool.py,sha256=IKoeuJZY9YAYm0GK4q-MXAhyXW0M_FnvabYaNsDIR-E,37133 -pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-39.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-39.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-39.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-39.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-39.pyc,, -pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-39.pyc,, -pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957 -pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-39.pyc,, -pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-39.pyc,, -pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=eRy1Mj-wpg7sR6-OSvnSV4jUbjMT464dLN_CWxbIRVw,17649 -pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=lgIdsSycqfB0Xm5BiJzXGeIKT7ybCQMFPJAgkcwPa1s,13908 -pip/_vendor/urllib3/contrib/appengine.py,sha256=lm86XjaOI7ajbonsN0JLA0ckkgSFWhgxWKLW_Ymt4sI,11034 -pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=6I95h1_71fzxmoMSNtY0gB8lnyCoVtP_DpqFGj14fdU,4160 -pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=kqm9SX4h_6h76QwGDBiNQ7i-ktKZunZuxzTVjjtHDto,16795 -pip/_vendor/urllib3/contrib/securetransport.py,sha256=MEEHa3YqG8ifDPYG0gO12C1tZu2I-HqGF4lC53cHFPg,34303 -pip/_vendor/urllib3/contrib/socks.py,sha256=DcRjM2l0rQMIyhYrN6r-tnVkY6ZTDxHJlM8_usAkGCA,7097 -pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217 -pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579 -pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440 -pip/_vendor/urllib3/packages/__init__.py,sha256=h4BLhD4tLaBx1adaDtKXfupsgqY0wWLXb_f1_yVlV6A,108 -pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/urllib3/packages/__pycache__/six.cpython-39.pyc,, -pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-39.pyc,, -pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417 -pip/_vendor/urllib3/packages/six.py,sha256=adx4z-eM_D0Vvu0IIqVzFACQ_ux9l64y7DkSEfbxCDs,32536 -pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py,sha256=zppezdEQdpGsYerI6mV6MfUYy495JV4mcOWC_GgbljU,757 -pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-39.pyc,, -pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py,sha256=6dZ-q074g7XhsJ27MFCgkct8iVNZB3sMZvKhf-KUVy0,5679 -pip/_vendor/urllib3/poolmanager.py,sha256=whzlX6UTEgODMOCy0ZDMUONRBCz5wyIM8Z9opXAY-Lk,19763 -pip/_vendor/urllib3/request.py,sha256=ZFSIqX0C6WizixecChZ3_okyu7BEv0lZu1VT0s6h4SM,5985 -pip/_vendor/urllib3/response.py,sha256=hGhGBh7TkEkh_IQg5C1W_xuPNrgIKv5BUXPyE-q0LuE,28203 -pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155 -pip/_vendor/urllib3/util/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/connection.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/proxy.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/queue.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/request.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/response.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/retry.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/timeout.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/url.cpython-39.pyc,, -pip/_vendor/urllib3/util/__pycache__/wait.cpython-39.pyc,, -pip/_vendor/urllib3/util/connection.py,sha256=_I-ZoF58xXLLjo-Q5IGaJrMxy2IW_exI8K9O9pq7op0,4922 -pip/_vendor/urllib3/util/proxy.py,sha256=FGipAEnvZteyldXNjce4DEB7YzwU-a5lep8y5S0qHQg,1604 -pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498 -pip/_vendor/urllib3/util/request.py,sha256=NnzaEKQ1Pauw5MFMV6HmgEMHITf0Aua9fQuzi2uZzGc,4123 -pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510 -pip/_vendor/urllib3/util/retry.py,sha256=s3ZNKXO6_t23ZQMg8zlu20PMSqraT495-S_mEY_19ak,21396 -pip/_vendor/urllib3/util/ssl_.py,sha256=dKcH-sqiR_ESWqKP1PJ6SUAUSvqC-fkMQGrTokV4NMY,16281 -pip/_vendor/urllib3/util/ssltransport.py,sha256=vOOCPRn-dODUZ2qtMCfStb0JmjgrgJaKLqJ9qvKucFs,6932 -pip/_vendor/urllib3/util/timeout.py,sha256=QSbBUNOB9yh6AnDn61SrLQ0hg5oz0I9-uXEG91AJuIg,10003 -pip/_vendor/urllib3/util/url.py,sha256=KP_yaHA0TFFAsQSImc_FOHO-Wq3PNHf_bKObKcrgdU4,13981 -pip/_vendor/urllib3/util/wait.py,sha256=3MUKRSAUJDB2tgco7qRUskW0zXGAWYvRRE4Q1_6xlLs,5404 -pip/_vendor/vendor.txt,sha256=yaN2qLLkKuoRmFLCxGJ1LZtZiuV7T7NoisZqwWNRhIU,364 -pip/_vendor/webencodings/__init__.py,sha256=qOBJIuPy_4ByYH6W_bNgJF-qYQ2DoU-dKsDu5yRWCXg,10579 -pip/_vendor/webencodings/__pycache__/__init__.cpython-39.pyc,, -pip/_vendor/webencodings/__pycache__/labels.cpython-39.pyc,, -pip/_vendor/webencodings/__pycache__/mklabels.cpython-39.pyc,, -pip/_vendor/webencodings/__pycache__/tests.cpython-39.pyc,, -pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-39.pyc,, -pip/_vendor/webencodings/labels.py,sha256=4AO_KxTddqGtrL9ns7kAPjb0CcN6xsCIxbK37HY9r3E,8979 -pip/_vendor/webencodings/mklabels.py,sha256=GYIeywnpaLnP0GSic8LFWgd0UVvO_l1Nc6YoF-87R_4,1305 -pip/_vendor/webencodings/tests.py,sha256=OtGLyjhNY1fvkW1GvLJ_FV9ZoqC9Anyjr7q3kxTbzNs,6563 -pip/_vendor/webencodings/x_user_defined.py,sha256=yOqWSdmpytGfUgh_Z6JYgDNhoc-BAHyyeeT15Fr42tM,4307 -pip/py.typed,sha256=l9g-Fc1zgtIZ70tLJDcx6qKeqDutTVVSceIqUod-awg,286 diff --git a/env/Lib/site-packages/pip-21.1.3.dist-info/WHEEL b/env/Lib/site-packages/pip-21.1.3.dist-info/WHEEL deleted file mode 100644 index 385faab0525ccdbfd1070a8bebcca3ac8617236e..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip-21.1.3.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.36.2) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/env/Lib/site-packages/pip-21.1.3.dist-info/entry_points.txt b/env/Lib/site-packages/pip-21.1.3.dist-info/entry_points.txt deleted file mode 100644 index d48bd8a85e683c7a9607f3f418f50d11445bdf40..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip-21.1.3.dist-info/entry_points.txt +++ /dev/null @@ -1,5 +0,0 @@ -[console_scripts] -pip = pip._internal.cli.main:main -pip3 = pip._internal.cli.main:main -pip3.8 = pip._internal.cli.main:main - diff --git a/env/Lib/site-packages/pip-21.1.3.dist-info/top_level.txt b/env/Lib/site-packages/pip-21.1.3.dist-info/top_level.txt deleted file mode 100644 index a1b589e38a32041e49332e5e81c2d363dc418d68..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip-21.1.3.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/env/Lib/site-packages/pip/__init__.py b/env/Lib/site-packages/pip/__init__.py index 75815d8bb6a0ca9f4d5d15f593147339cb9b27c7..6633ef7ed7c27ba8e228b254d1329bffec675ad6 100644 --- a/env/Lib/site-packages/pip/__init__.py +++ b/env/Lib/site-packages/pip/__init__.py @@ -1,10 +1,9 @@ from typing import List, Optional -__version__ = "21.1.3" +__version__ = "23.2.1" -def main(args=None): - # type: (Optional[List[str]]) -> int +def main(args: Optional[List[str]] = None) -> int: """This is an internal API only meant for use by pip's own console scripts. For additional details, see https://github.com/pypa/pip/issues/7498. diff --git a/env/Lib/site-packages/pip/__main__.py b/env/Lib/site-packages/pip/__main__.py index fe34a7b7772cef55f5b5cb3455a2850489620ca7..5991326115fe5026470165b387ba2bc78bceb006 100644 --- a/env/Lib/site-packages/pip/__main__.py +++ b/env/Lib/site-packages/pip/__main__.py @@ -1,6 +1,5 @@ import os import sys -import warnings # Remove '' and current working directory from the first entry # of sys.path, if present to avoid using current directory @@ -20,12 +19,6 @@ if __package__ == "": sys.path.insert(0, path) if __name__ == "__main__": - # Work around the error reported in #9540, pending a proper fix. - # Note: It is essential the warning filter is set *before* importing - # pip, as the deprecation happens at import time, not runtime. - warnings.filterwarnings( - "ignore", category=DeprecationWarning, module=".*packaging\\.version" - ) from pip._internal.cli.main import main as _main sys.exit(_main()) diff --git a/env/Lib/site-packages/pip/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index dc3d1815e1a9ac6e62163d03e139a6282a04e7a4..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/__pycache__/__main__.cpython-39.pyc b/env/Lib/site-packages/pip/__pycache__/__main__.cpython-39.pyc deleted file mode 100644 index bd7a243e21c993216c2db568cdbfd2a812a53f8a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/__pycache__/__main__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/__init__.py b/env/Lib/site-packages/pip/_internal/__init__.py index 41071cd8608e42e3d8f97c38c218569dd296eab0..6afb5c627ce3db6e61cbf46276f7ddd42552eb28 100644 --- a/env/Lib/site-packages/pip/_internal/__init__.py +++ b/env/Lib/site-packages/pip/_internal/__init__.py @@ -1,10 +1,14 @@ from typing import List, Optional import pip._internal.utils.inject_securetransport # noqa +from pip._internal.utils import _log +# init_logging() must be called before any call to logging.getLogger() +# which happens at import of most modules. +_log.init_logging() -def main(args=None): - # type: (Optional[List[str]]) -> int + +def main(args: (Optional[List[str]]) = None) -> int: """This is preserved for old console scripts that may still be referencing it. diff --git a/env/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 7fe14dae3c07d62767e554113e942c5938779003..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-39.pyc deleted file mode 100644 index 033a15b6b579e37bf15c82ae533c009683c074b1..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/__pycache__/build_env.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-39.pyc deleted file mode 100644 index c8bfcdf1b6e6d5cd821d106a50134e3e6c8983c9..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/__pycache__/cache.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-39.pyc deleted file mode 100644 index 5aff75cd21c04afcc029cd0a05d2dd4a94f157d0..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/__pycache__/configuration.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-39.pyc deleted file mode 100644 index e6a9ab952836dbe62cd94fcd7a1cf5e0175297b3..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/__pycache__/exceptions.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/__pycache__/main.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/__pycache__/main.cpython-39.pyc deleted file mode 100644 index 343a299dd20ad3517a6b1dad93f79a0ecf8cf061..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/__pycache__/main.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-39.pyc deleted file mode 100644 index d4627db45e6348451a0219f7131b6b479df88347..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/__pycache__/pyproject.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-39.pyc deleted file mode 100644 index 936888cbd4eb0274946e937dde5ccbe6f7016557..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/__pycache__/self_outdated_check.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-39.pyc deleted file mode 100644 index 9a6a77a02ea6035e17a88e86924b1df096153934..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/__pycache__/wheel_builder.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/build_env.py b/env/Lib/site-packages/pip/_internal/build_env.py index cc15250fcf7ad95fdb5d317a52a76c5d49c7fbc8..4f704a3547da02f913d6cfdbd4e0ed77c81caabe 100644 --- a/env/Lib/site-packages/pip/_internal/build_env.py +++ b/env/Lib/site-packages/pip/_internal/build_env.py @@ -1,24 +1,24 @@ """Build Environment used for isolation during sdist building """ -import contextlib import logging import os import pathlib +import site import sys import textwrap -import zipfile from collections import OrderedDict -from sysconfig import get_paths from types import TracebackType -from typing import TYPE_CHECKING, Iterable, Iterator, List, Optional, Set, Tuple, Type +from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Type, Union from pip._vendor.certifi import where -from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.version import Version from pip import __file__ as pip_location from pip._internal.cli.spinners import open_spinner -from pip._internal.locations import get_platlib, get_prefixed_libs, get_purelib +from pip._internal.locations import get_platlib, get_purelib, get_scheme +from pip._internal.metadata import get_default_environment, get_environment from pip._internal.utils.subprocess import call_subprocess from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds @@ -28,62 +28,68 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class _Prefix: +def _dedup(a: str, b: str) -> Union[Tuple[str], Tuple[str, str]]: + return (a, b) if a != b else (a,) + - def __init__(self, path): - # type: (str) -> None +class _Prefix: + def __init__(self, path: str) -> None: self.path = path self.setup = False - self.bin_dir = get_paths( - 'nt' if os.name == 'nt' else 'posix_prefix', - vars={'base': path, 'platbase': path} - )['scripts'] - self.lib_dirs = get_prefixed_libs(path) + scheme = get_scheme("", prefix=path) + self.bin_dir = scheme.scripts + self.lib_dirs = _dedup(scheme.purelib, scheme.platlib) -@contextlib.contextmanager -def _create_standalone_pip() -> Iterator[str]: - """Create a "standalone pip" zip file. +def get_runnable_pip() -> str: + """Get a file to pass to a Python executable, to run the currently-running pip. - The zip file's content is identical to the currently-running pip. - It will be used to install requirements into the build environment. + This is used to run a pip subprocess, for installing requirements into the build + environment. """ source = pathlib.Path(pip_location).resolve().parent - # Return the current instance if `source` is not a directory. We can't build - # a zip from this, and it likely means the instance is already standalone. if not source.is_dir(): - yield str(source) - return + # This would happen if someone is using pip from inside a zip file. In that + # case, we can use that directly. + return str(source) - with TempDirectory(kind="standalone-pip") as tmp_dir: - pip_zip = os.path.join(tmp_dir.path, "__env_pip__.zip") - kwargs = {} - if sys.version_info >= (3, 8): - kwargs["strict_timestamps"] = False - with zipfile.ZipFile(pip_zip, "w", **kwargs) as zf: - for child in source.rglob("*"): - zf.write(child, child.relative_to(source.parent).as_posix()) - yield os.path.join(pip_zip, "pip") + return os.fsdecode(source / "__pip-runner__.py") -class BuildEnvironment: - """Creates and manages an isolated environment to install build deps +def _get_system_sitepackages() -> Set[str]: + """Get system site packages + + Usually from site.getsitepackages, + but fallback on `get_purelib()/get_platlib()` if unavailable + (e.g. in a virtualenv created by virtualenv<20) + + Returns normalized set of strings. """ + if hasattr(site, "getsitepackages"): + system_sites = site.getsitepackages() + else: + # virtualenv < 20 overwrites site.py without getsitepackages + # fallback on get_purelib/get_platlib. + # this is known to miss things, but shouldn't in the cases + # where getsitepackages() has been removed (inside a virtualenv) + system_sites = [get_purelib(), get_platlib()] + return {os.path.normcase(path) for path in system_sites} - def __init__(self): - # type: () -> None - temp_dir = TempDirectory( - kind=tempdir_kinds.BUILD_ENV, globally_managed=True - ) + +class BuildEnvironment: + """Creates and manages an isolated environment to install build deps""" + + def __init__(self) -> None: + temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True) self._prefixes = OrderedDict( (name, _Prefix(os.path.join(temp_dir.path, name))) - for name in ('normal', 'overlay') + for name in ("normal", "overlay") ) - self._bin_dirs = [] # type: List[str] - self._lib_dirs = [] # type: List[str] + self._bin_dirs: List[str] = [] + self._lib_dirs: List[str] = [] for prefix in reversed(list(self._prefixes.values())): self._bin_dirs.append(prefix.bin_dir) self._lib_dirs.extend(prefix.lib_dirs) @@ -91,15 +97,17 @@ class BuildEnvironment: # Customize site to: # - ensure .pth files are honored # - prevent access to system site packages - system_sites = { - os.path.normcase(site) for site in (get_purelib(), get_platlib()) - } - self._site_dir = os.path.join(temp_dir.path, 'site') + system_sites = _get_system_sitepackages() + + self._site_dir = os.path.join(temp_dir.path, "site") if not os.path.exists(self._site_dir): os.mkdir(self._site_dir) - with open(os.path.join(self._site_dir, 'sitecustomize.py'), 'w') as fp: - fp.write(textwrap.dedent( - ''' + with open( + os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8" + ) as fp: + fp.write( + textwrap.dedent( + """ import os, site, sys # First, drop system-sites related paths. @@ -122,90 +130,97 @@ class BuildEnvironment: for path in {lib_dirs!r}: assert not path in sys.path site.addsitedir(path) - ''' - ).format(system_sites=system_sites, lib_dirs=self._lib_dirs)) + """ + ).format(system_sites=system_sites, lib_dirs=self._lib_dirs) + ) - def __enter__(self): - # type: () -> None + def __enter__(self) -> None: self._save_env = { name: os.environ.get(name, None) - for name in ('PATH', 'PYTHONNOUSERSITE', 'PYTHONPATH') + for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH") } path = self._bin_dirs[:] - old_path = self._save_env['PATH'] + old_path = self._save_env["PATH"] if old_path: path.extend(old_path.split(os.pathsep)) pythonpath = [self._site_dir] - os.environ.update({ - 'PATH': os.pathsep.join(path), - 'PYTHONNOUSERSITE': '1', - 'PYTHONPATH': os.pathsep.join(pythonpath), - }) + os.environ.update( + { + "PATH": os.pathsep.join(path), + "PYTHONNOUSERSITE": "1", + "PYTHONPATH": os.pathsep.join(pythonpath), + } + ) def __exit__( self, - exc_type, # type: Optional[Type[BaseException]] - exc_val, # type: Optional[BaseException] - exc_tb # type: Optional[TracebackType] - ): - # type: (...) -> None + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: for varname, old_value in self._save_env.items(): if old_value is None: os.environ.pop(varname, None) else: os.environ[varname] = old_value - def check_requirements(self, reqs): - # type: (Iterable[str]) -> Tuple[Set[Tuple[str, str]], Set[str]] + def check_requirements( + self, reqs: Iterable[str] + ) -> Tuple[Set[Tuple[str, str]], Set[str]]: """Return 2 sets: - - conflicting requirements: set of (installed, wanted) reqs tuples - - missing requirements: set of reqs + - conflicting requirements: set of (installed, wanted) reqs tuples + - missing requirements: set of reqs """ missing = set() conflicting = set() if reqs: - ws = WorkingSet(self._lib_dirs) - for req in reqs: - try: - if ws.find(Requirement.parse(req)) is None: - missing.add(req) - except VersionConflict as e: - conflicting.add((str(e.args[0].as_requirement()), - str(e.args[1]))) + env = ( + get_environment(self._lib_dirs) + if hasattr(self, "_lib_dirs") + else get_default_environment() + ) + for req_str in reqs: + req = Requirement(req_str) + # We're explicitly evaluating with an empty extra value, since build + # environments are not provided any mechanism to select specific extras. + if req.marker is not None and not req.marker.evaluate({"extra": ""}): + continue + dist = env.get_distribution(req.name) + if not dist: + missing.add(req_str) + continue + if isinstance(dist.version, Version): + installed_req_str = f"{req.name}=={dist.version}" + else: + installed_req_str = f"{req.name}==={dist.version}" + if not req.specifier.contains(dist.version, prereleases=True): + conflicting.add((installed_req_str, req_str)) + # FIXME: Consider direct URL? return conflicting, missing def install_requirements( self, - finder, # type: PackageFinder - requirements, # type: Iterable[str] - prefix_as_string, # type: str - message # type: str - ): - # type: (...) -> None + finder: "PackageFinder", + requirements: Iterable[str], + prefix_as_string: str, + *, + kind: str, + ) -> None: prefix = self._prefixes[prefix_as_string] assert not prefix.setup prefix.setup = True if not requirements: return - with contextlib.ExitStack() as ctx: - # TODO: Remove this block when dropping 3.6 support. Python 3.6 - # lacks importlib.resources and pep517 has issues loading files in - # a zip, so we fallback to the "old" method by adding the current - # pip directory to the child process's sys.path. - if sys.version_info < (3, 7): - pip_runnable = os.path.dirname(pip_location) - else: - pip_runnable = ctx.enter_context(_create_standalone_pip()) - self._install_requirements( - pip_runnable, - finder, - requirements, - prefix, - message, - ) + self._install_requirements( + get_runnable_pip(), + finder, + requirements, + prefix, + kind=kind, + ) @staticmethod def _install_requirements( @@ -213,74 +228,84 @@ class BuildEnvironment: finder: "PackageFinder", requirements: Iterable[str], prefix: _Prefix, - message: str, + *, + kind: str, ) -> None: - args = [ - sys.executable, pip_runnable, 'install', - '--ignore-installed', '--no-user', '--prefix', prefix.path, - '--no-warn-script-location', - ] # type: List[str] + args: List[str] = [ + sys.executable, + pip_runnable, + "install", + "--ignore-installed", + "--no-user", + "--prefix", + prefix.path, + "--no-warn-script-location", + ] if logger.getEffectiveLevel() <= logging.DEBUG: - args.append('-v') - for format_control in ('no_binary', 'only_binary'): + args.append("-v") + for format_control in ("no_binary", "only_binary"): formats = getattr(finder.format_control, format_control) - args.extend(('--' + format_control.replace('_', '-'), - ','.join(sorted(formats or {':none:'})))) + args.extend( + ( + "--" + format_control.replace("_", "-"), + ",".join(sorted(formats or {":none:"})), + ) + ) index_urls = finder.index_urls if index_urls: - args.extend(['-i', index_urls[0]]) + args.extend(["-i", index_urls[0]]) for extra_index in index_urls[1:]: - args.extend(['--extra-index-url', extra_index]) + args.extend(["--extra-index-url", extra_index]) else: - args.append('--no-index') + args.append("--no-index") for link in finder.find_links: - args.extend(['--find-links', link]) + args.extend(["--find-links", link]) for host in finder.trusted_hosts: - args.extend(['--trusted-host', host]) + args.extend(["--trusted-host", host]) if finder.allow_all_prereleases: - args.append('--pre') + args.append("--pre") if finder.prefer_binary: - args.append('--prefer-binary') - args.append('--') + args.append("--prefer-binary") + args.append("--") args.extend(requirements) extra_environ = {"_PIP_STANDALONE_CERT": where()} - with open_spinner(message) as spinner: - call_subprocess(args, spinner=spinner, extra_environ=extra_environ) + with open_spinner(f"Installing {kind}") as spinner: + call_subprocess( + args, + command_desc=f"pip subprocess to install {kind}", + spinner=spinner, + extra_environ=extra_environ, + ) class NoOpBuildEnvironment(BuildEnvironment): - """A no-op drop-in replacement for BuildEnvironment - """ + """A no-op drop-in replacement for BuildEnvironment""" - def __init__(self): - # type: () -> None + def __init__(self) -> None: pass - def __enter__(self): - # type: () -> None + def __enter__(self) -> None: pass def __exit__( self, - exc_type, # type: Optional[Type[BaseException]] - exc_val, # type: Optional[BaseException] - exc_tb # type: Optional[TracebackType] - ): - # type: (...) -> None + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: pass - def cleanup(self): - # type: () -> None + def cleanup(self) -> None: pass def install_requirements( self, - finder, # type: PackageFinder - requirements, # type: Iterable[str] - prefix_as_string, # type: str - message # type: str - ): - # type: (...) -> None + finder: "PackageFinder", + requirements: Iterable[str], + prefix_as_string: str, + *, + kind: str, + ) -> None: raise NotImplementedError() diff --git a/env/Lib/site-packages/pip/_internal/cache.py b/env/Lib/site-packages/pip/_internal/cache.py index 7ef51b92e1d9cbbcf0aa3dc46b892c08fd7a6c55..8d3a664c7d1be57579608a7c1e1da4570b439a19 100644 --- a/env/Lib/site-packages/pip/_internal/cache.py +++ b/env/Lib/site-packages/pip/_internal/cache.py @@ -5,13 +5,14 @@ import hashlib import json import logging import os -from typing import Any, Dict, List, Optional, Set +from pathlib import Path +from typing import Any, Dict, List, Optional from pip._vendor.packaging.tags import Tag, interpreter_name, interpreter_version from pip._vendor.packaging.utils import canonicalize_name from pip._internal.exceptions import InvalidWheelFilename -from pip._internal.models.format_control import FormatControl +from pip._internal.models.direct_url import DirectUrl from pip._internal.models.link import Link from pip._internal.models.wheel import Wheel from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds @@ -19,9 +20,10 @@ from pip._internal.utils.urls import path_to_url logger = logging.getLogger(__name__) +ORIGIN_JSON_NAME = "origin.json" -def _hash_dict(d): - # type: (Dict[str, str]) -> str + +def _hash_dict(d: Dict[str, str]) -> str: """Return a stable sha224 of a dictionary.""" s = json.dumps(d, sort_keys=True, separators=(",", ":"), ensure_ascii=True) return hashlib.sha224(s.encode("ascii")).hexdigest() @@ -30,29 +32,16 @@ def _hash_dict(d): class Cache: """An abstract class - provides cache directories for data from links - - :param cache_dir: The root of the cache. - :param format_control: An object of FormatControl class to limit - binaries being read from the cache. - :param allowed_formats: which formats of files the cache should store. - ('binary' and 'source' are the only allowed values) + :param cache_dir: The root of the cache. """ - def __init__(self, cache_dir, format_control, allowed_formats): - # type: (str, FormatControl, Set[str]) -> None + def __init__(self, cache_dir: str) -> None: super().__init__() assert not cache_dir or os.path.isabs(cache_dir) self.cache_dir = cache_dir or None - self.format_control = format_control - self.allowed_formats = allowed_formats - - _valid_formats = {"source", "binary"} - assert self.allowed_formats.union(_valid_formats) == _valid_formats - def _get_cache_path_parts(self, link): - # type: (Link) -> List[str] - """Get parts of part that must be os.path.joined with cache_dir - """ + def _get_cache_path_parts(self, link: Link) -> List[str]: + """Get parts of part that must be os.path.joined with cache_dir""" # We want to generate an url to use as our cache key, we don't want to # just re-use the URL because it might have other items in the fragment @@ -84,22 +73,11 @@ class Cache: return parts - def _get_candidates(self, link, canonical_package_name): - # type: (Link, str) -> List[Any] - can_not_cache = ( - not self.cache_dir or - not canonical_package_name or - not link - ) + def _get_candidates(self, link: Link, canonical_package_name: str) -> List[Any]: + can_not_cache = not self.cache_dir or not canonical_package_name or not link if can_not_cache: return [] - formats = self.format_control.get_allowed_formats( - canonical_package_name - ) - if not self.allowed_formats.intersection(formats): - return [] - candidates = [] path = self.get_path_for_link(link) if os.path.isdir(path): @@ -107,19 +85,16 @@ class Cache: candidates.append((candidate, path)) return candidates - def get_path_for_link(self, link): - # type: (Link) -> str - """Return a directory to store cached items in for link. - """ + def get_path_for_link(self, link: Link) -> str: + """Return a directory to store cached items in for link.""" raise NotImplementedError() def get( self, - link, # type: Link - package_name, # type: Optional[str] - supported_tags, # type: List[Tag] - ): - # type: (...) -> Link + link: Link, + package_name: Optional[str], + supported_tags: List[Tag], + ) -> Link: """Returns a link to a cached item if it exists, otherwise returns the passed link. """ @@ -127,15 +102,12 @@ class Cache: class SimpleWheelCache(Cache): - """A cache of wheels for future installs. - """ + """A cache of wheels for future installs.""" - def __init__(self, cache_dir, format_control): - # type: (str, FormatControl) -> None - super().__init__(cache_dir, format_control, {"binary"}) + def __init__(self, cache_dir: str) -> None: + super().__init__(cache_dir) - def get_path_for_link(self, link): - # type: (Link) -> str + def get_path_for_link(self, link: Link) -> str: """Return a directory to store cached wheels for link Because there are M wheels for any one sdist, we provide a directory @@ -157,20 +129,17 @@ class SimpleWheelCache(Cache): def get( self, - link, # type: Link - package_name, # type: Optional[str] - supported_tags, # type: List[Tag] - ): - # type: (...) -> Link + link: Link, + package_name: Optional[str], + supported_tags: List[Tag], + ) -> Link: candidates = [] if not package_name: return link canonical_package_name = canonicalize_name(package_name) - for wheel_name, wheel_dir in self._get_candidates( - link, canonical_package_name - ): + for wheel_name, wheel_dir in self._get_candidates(link, canonical_package_name): try: wheel = Wheel(wheel_name) except InvalidWheelFilename: @@ -179,7 +148,9 @@ class SimpleWheelCache(Cache): logger.debug( "Ignoring cached wheel %s for %s as it " "does not match the expected distribution name %s.", - wheel_name, link, package_name, + wheel_name, + link, + package_name, ) continue if not wheel.supported(supported_tags): @@ -201,27 +172,39 @@ class SimpleWheelCache(Cache): class EphemWheelCache(SimpleWheelCache): - """A SimpleWheelCache that creates it's own temporary cache directory - """ + """A SimpleWheelCache that creates it's own temporary cache directory""" - def __init__(self, format_control): - # type: (FormatControl) -> None + def __init__(self) -> None: self._temp_dir = TempDirectory( kind=tempdir_kinds.EPHEM_WHEEL_CACHE, globally_managed=True, ) - super().__init__(self._temp_dir.path, format_control) + super().__init__(self._temp_dir.path) class CacheEntry: def __init__( self, - link, # type: Link - persistent, # type: bool + link: Link, + persistent: bool, ): self.link = link self.persistent = persistent + self.origin: Optional[DirectUrl] = None + origin_direct_url_path = Path(self.link.file_path).parent / ORIGIN_JSON_NAME + if origin_direct_url_path.exists(): + try: + self.origin = DirectUrl.from_json( + origin_direct_url_path.read_text(encoding="utf-8") + ) + except Exception as e: + logger.warning( + "Ignoring invalid cache entry origin file %s for %s (%s)", + origin_direct_url_path, + link.filename, + e, + ) class WheelCache(Cache): @@ -231,27 +214,23 @@ class WheelCache(Cache): when a certain link is not found in the simple wheel cache first. """ - def __init__(self, cache_dir, format_control): - # type: (str, FormatControl) -> None - super().__init__(cache_dir, format_control, {'binary'}) - self._wheel_cache = SimpleWheelCache(cache_dir, format_control) - self._ephem_cache = EphemWheelCache(format_control) + def __init__(self, cache_dir: str) -> None: + super().__init__(cache_dir) + self._wheel_cache = SimpleWheelCache(cache_dir) + self._ephem_cache = EphemWheelCache() - def get_path_for_link(self, link): - # type: (Link) -> str + def get_path_for_link(self, link: Link) -> str: return self._wheel_cache.get_path_for_link(link) - def get_ephem_path_for_link(self, link): - # type: (Link) -> str + def get_ephem_path_for_link(self, link: Link) -> str: return self._ephem_cache.get_path_for_link(link) def get( self, - link, # type: Link - package_name, # type: Optional[str] - supported_tags, # type: List[Tag] - ): - # type: (...) -> Link + link: Link, + package_name: Optional[str], + supported_tags: List[Tag], + ) -> Link: cache_entry = self.get_cache_entry(link, package_name, supported_tags) if cache_entry is None: return link @@ -259,11 +238,10 @@ class WheelCache(Cache): def get_cache_entry( self, - link, # type: Link - package_name, # type: Optional[str] - supported_tags, # type: List[Tag] - ): - # type: (...) -> Optional[CacheEntry] + link: Link, + package_name: Optional[str], + supported_tags: List[Tag], + ) -> Optional[CacheEntry]: """Returns a CacheEntry with a link to a cached item if it exists or None. The cache entry indicates if the item was found in the persistent or ephemeral cache. @@ -285,3 +263,30 @@ class WheelCache(Cache): return CacheEntry(retval, persistent=False) return None + + @staticmethod + def record_download_origin(cache_dir: str, download_info: DirectUrl) -> None: + origin_path = Path(cache_dir) / ORIGIN_JSON_NAME + if origin_path.exists(): + try: + origin = DirectUrl.from_json(origin_path.read_text(encoding="utf-8")) + except Exception as e: + logger.warning( + "Could not read origin file %s in cache entry (%s). " + "Will attempt to overwrite it.", + origin_path, + e, + ) + else: + # TODO: use DirectUrl.equivalent when + # https://github.com/pypa/pip/pull/10564 is merged. + if origin.url != download_info.url: + logger.warning( + "Origin URL %s in cache entry %s does not match download URL " + "%s. This is likely a pip bug or a cache corruption issue. " + "Will overwrite it with the new value.", + origin.url, + cache_dir, + download_info.url, + ) + origin_path.write_text(download_info.to_json(), encoding="utf-8") diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index ff82adfae63eee653bc79382e24e4c238b6a410f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-39.pyc deleted file mode 100644 index 5bd91fd9253b4308b87b47d6255bec2e41e9d916..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-39.pyc deleted file mode 100644 index b3f214f29cbc78efc695aa019cc97ce97a990923..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-39.pyc deleted file mode 100644 index 12076cc0043e7bf61c200f413071848f1f2d3d10..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-39.pyc deleted file mode 100644 index c6722b70ee195e4921c901fcde9d50fd6b8d0848..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-39.pyc deleted file mode 100644 index 2a1ea13d959aefde891da5e34e74833dcebedcdb..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/main.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-39.pyc deleted file mode 100644 index 87424e9188b059ff2b0dd1d8a7285af6a210861d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-39.pyc deleted file mode 100644 index b3ce798f74719f2627fecd76d78d5c0fff11d182..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/parser.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-39.pyc deleted file mode 100644 index 876b8062f01fc20a4763f35b0baedb985488f221..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-39.pyc deleted file mode 100644 index 98a9806baf04bcc2b17af91728798359eb791a1b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-39.pyc deleted file mode 100644 index c1157315338cbb7d296cd9a545cf70586998bfa0..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-39.pyc deleted file mode 100644 index 603750ba7833106e3d2b6e9315c72062ecacba23..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/cli/autocompletion.py b/env/Lib/site-packages/pip/_internal/cli/autocompletion.py index 3b1d2ac9b113218e6163c92b6209b17c27cc3eb1..226fe84dc0d0c4eb78f9b3c603df20cef0fdfda4 100644 --- a/env/Lib/site-packages/pip/_internal/cli/autocompletion.py +++ b/env/Lib/site-packages/pip/_internal/cli/autocompletion.py @@ -9,11 +9,10 @@ from typing import Any, Iterable, List, Optional from pip._internal.cli.main_parser import create_main_parser from pip._internal.commands import commands_dict, create_command -from pip._internal.utils.misc import get_installed_distributions +from pip._internal.metadata import get_default_environment -def autocomplete(): - # type: () -> None +def autocomplete() -> None: """Entry Point for completion of main and subcommand options.""" # Don't complete if user hasn't sourced bash_completion file. if "PIP_AUTO_COMPLETE" not in os.environ: @@ -30,7 +29,7 @@ def autocomplete(): options = [] # subcommand - subcommand_name = None # type: Optional[str] + subcommand_name: Optional[str] = None for word in cwords: if word in subcommands: subcommand_name = word @@ -46,11 +45,13 @@ def autocomplete(): "uninstall", ] if should_list_installed: + env = get_default_environment() lc = current.lower() installed = [ - dist.key - for dist in get_installed_distributions(local_only=True) - if dist.key.startswith(lc) and dist.key not in cwords[1:] + dist.canonical_name + for dist in env.iter_installed_distributions(local_only=True) + if dist.canonical_name.startswith(lc) + and dist.canonical_name not in cwords[1:] ] # if there are no dists installed, fall back to option completion if installed: @@ -58,6 +59,14 @@ def autocomplete(): print(dist) sys.exit(1) + should_list_installables = ( + not current.startswith("-") and subcommand_name == "install" + ) + if should_list_installables: + for path in auto_complete_paths(current, "path"): + print(path) + sys.exit(1) + subcommand = create_command(subcommand_name) for opt in subcommand.parser.option_list_all: @@ -107,8 +116,9 @@ def autocomplete(): sys.exit(1) -def get_path_completion_type(cwords, cword, opts): - # type: (List[str], int, Iterable[Any]) -> Optional[str] +def get_path_completion_type( + cwords: List[str], cword: int, opts: Iterable[Any] +) -> Optional[str]: """Get the type of path completion (``file``, ``dir``, ``path`` or None) :param cwords: same as the environmental variable ``COMP_WORDS`` @@ -130,14 +140,13 @@ def get_path_completion_type(cwords, cword, opts): return None -def auto_complete_paths(current, completion_type): - # type: (str, str) -> Iterable[str] +def auto_complete_paths(current: str, completion_type: str) -> Iterable[str]: """If ``completion_type`` is ``file`` or ``path``, list all regular files and directories starting with ``current``; otherwise only list directories starting with ``current``. :param current: The word to be completed - :param completion_type: path completion type(`file`, `path` or `dir`)i + :param completion_type: path completion type(``file``, ``path`` or ``dir``) :return: A generator of regular files and/or directories """ directory, filename = os.path.split(current) diff --git a/env/Lib/site-packages/pip/_internal/cli/base_command.py b/env/Lib/site-packages/pip/_internal/cli/base_command.py index b59420dda226367cec48c920fd1872cf3e15c310..6a3b8e6c213a9a8069b38729ab3a0c16a213ce62 100644 --- a/env/Lib/site-packages/pip/_internal/cli/base_command.py +++ b/env/Lib/site-packages/pip/_internal/cli/base_command.py @@ -1,5 +1,6 @@ """Base Command class, and related routines""" +import functools import logging import logging.config import optparse @@ -7,7 +8,9 @@ import os import sys import traceback from optparse import Values -from typing import Any, List, Optional, Tuple +from typing import Any, Callable, List, Optional, Tuple + +from pip._vendor.rich import traceback as rich_traceback from pip._internal.cli import cmdoptions from pip._internal.cli.command_context import CommandContextMixIn @@ -21,12 +24,12 @@ from pip._internal.cli.status_codes import ( from pip._internal.exceptions import ( BadCommand, CommandError, + DiagnosticPipError, InstallationError, NetworkConnectionError, PreviousBuildDirError, UninstallationError, ) -from pip._internal.utils.deprecation import deprecated from pip._internal.utils.filesystem import check_path_owner from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging from pip._internal.utils.misc import get_prog, normalize_path @@ -40,11 +43,10 @@ logger = logging.getLogger(__name__) class Command(CommandContextMixIn): - usage = None # type: str - ignore_require_venv = False # type: bool + usage: str = "" + ignore_require_venv: bool = False - def __init__(self, name, summary, isolated=False): - # type: (str, str, bool) -> None + def __init__(self, name: str, summary: str, isolated: bool = False) -> None: super().__init__() self.name = name @@ -59,7 +61,7 @@ class Command(CommandContextMixIn): isolated=isolated, ) - self.tempdir_registry = None # type: Optional[TempDirRegistry] + self.tempdir_registry: Optional[TempDirRegistry] = None # Commands should add options to this option group optgroup_name = f"{self.name.capitalize()} Options" @@ -74,12 +76,10 @@ class Command(CommandContextMixIn): self.add_options() - def add_options(self): - # type: () -> None + def add_options(self) -> None: pass - def handle_pip_version_check(self, options): - # type: (Values) -> None + def handle_pip_version_check(self, options: Values) -> None: """ This is a no-op so that commands by default do not do the pip version check. @@ -88,25 +88,21 @@ class Command(CommandContextMixIn): # are present. assert not hasattr(options, "no_index") - def run(self, options, args): - # type: (Values, List[Any]) -> int + def run(self, options: Values, args: List[str]) -> int: raise NotImplementedError - def parse_args(self, args): - # type: (List[str]) -> Tuple[Any, Any] + def parse_args(self, args: List[str]) -> Tuple[Values, List[str]]: # factored out for testability return self.parser.parse_args(args) - def main(self, args): - # type: (List[str]) -> int + def main(self, args: List[str]) -> int: try: with self.main_context(): return self._main(args) finally: logging.shutdown() - def _main(self, args): - # type: (List[str]) -> int + def _main(self, args: List[str]) -> int: # We must initialize this before the tempdir manager, otherwise the # configuration would not be accessible by the time we clean up the # tempdir manager. @@ -126,6 +122,26 @@ class Command(CommandContextMixIn): user_log_file=options.log, ) + always_enabled_features = set(options.features_enabled) & set( + cmdoptions.ALWAYS_ENABLED_FEATURES + ) + if always_enabled_features: + logger.warning( + "The following features are always enabled: %s. ", + ", ".join(sorted(always_enabled_features)), + ) + + # Make sure that the --python argument isn't specified after the + # subcommand. We can tell, because if --python was specified, + # we should only reach this point if we're running in the created + # subprocess, which has the _PIP_RUNNING_IN_SUBPROCESS environment + # variable set. + if options.python and "_PIP_RUNNING_IN_SUBPROCESS" not in os.environ: + logger.critical( + "The --python option must be placed before the pip subcommand name" + ) + sys.exit(ERROR) + # TODO: Try to get these passing down from the command? # without resorting to os.environ to hold these. # This also affects isolated builds and it should. @@ -155,67 +171,66 @@ class Command(CommandContextMixIn): ) options.cache_dir = None - if getattr(options, "build_dir", None): - deprecated( - reason=( - "The -b/--build/--build-dir/--build-directory " - "option is deprecated and has no effect anymore." - ), - replacement=( - "use the TMPDIR/TEMP/TMP environment variable, " - "possibly combined with --no-clean" - ), - gone_in="21.3", - issue=8333, - ) - - if "2020-resolver" in options.features_enabled: - logger.warning( - "--use-feature=2020-resolver no longer has any effect, " - "since it is now the default dependency resolver in pip. " - "This will become an error in pip 21.0." - ) + def intercepts_unhandled_exc( + run_func: Callable[..., int] + ) -> Callable[..., int]: + @functools.wraps(run_func) + def exc_logging_wrapper(*args: Any) -> int: + try: + status = run_func(*args) + assert isinstance(status, int) + return status + except DiagnosticPipError as exc: + logger.error("[present-rich] %s", exc) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except PreviousBuildDirError as exc: + logger.critical(str(exc)) + logger.debug("Exception information:", exc_info=True) + + return PREVIOUS_BUILD_DIR_ERROR + except ( + InstallationError, + UninstallationError, + BadCommand, + NetworkConnectionError, + ) as exc: + logger.critical(str(exc)) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except CommandError as exc: + logger.critical("%s", exc) + logger.debug("Exception information:", exc_info=True) + + return ERROR + except BrokenStdoutLoggingError: + # Bypass our logger and write any remaining messages to + # stderr because stdout no longer works. + print("ERROR: Pipe to stdout was broken", file=sys.stderr) + if level_number <= logging.DEBUG: + traceback.print_exc(file=sys.stderr) + + return ERROR + except KeyboardInterrupt: + logger.critical("Operation cancelled by user") + logger.debug("Exception information:", exc_info=True) + + return ERROR + except BaseException: + logger.critical("Exception:", exc_info=True) + + return UNKNOWN_ERROR + + return exc_logging_wrapper try: - status = self.run(options, args) - assert isinstance(status, int) - return status - except PreviousBuildDirError as exc: - logger.critical(str(exc)) - logger.debug("Exception information:", exc_info=True) - - return PREVIOUS_BUILD_DIR_ERROR - except ( - InstallationError, - UninstallationError, - BadCommand, - NetworkConnectionError, - ) as exc: - logger.critical(str(exc)) - logger.debug("Exception information:", exc_info=True) - - return ERROR - except CommandError as exc: - logger.critical("%s", exc) - logger.debug("Exception information:", exc_info=True) - - return ERROR - except BrokenStdoutLoggingError: - # Bypass our logger and write any remaining messages to stderr - # because stdout no longer works. - print("ERROR: Pipe to stdout was broken", file=sys.stderr) - if level_number <= logging.DEBUG: - traceback.print_exc(file=sys.stderr) - - return ERROR - except KeyboardInterrupt: - logger.critical("Operation cancelled by user") - logger.debug("Exception information:", exc_info=True) - - return ERROR - except BaseException: - logger.critical("Exception:", exc_info=True) - - return UNKNOWN_ERROR + if not options.debug_mode: + run = intercepts_unhandled_exc(self.run) + else: + run = self.run + rich_traceback.install(show_locals=True) + return run(options, args) finally: self.handle_pip_version_check(options) diff --git a/env/Lib/site-packages/pip/_internal/cli/cmdoptions.py b/env/Lib/site-packages/pip/_internal/cli/cmdoptions.py index f71c0b020116ae97bd345db15ac8c2ee8e6c6d43..02ba60827933d6623cdf6b1417762fee47c1ab6f 100644 --- a/env/Lib/site-packages/pip/_internal/cli/cmdoptions.py +++ b/env/Lib/site-packages/pip/_internal/cli/cmdoptions.py @@ -10,9 +10,10 @@ pass on state. To be consistent, all options will follow this design. # The following comment should be removed at some point in the future. # mypy: strict-optional=False +import importlib.util +import logging import os import textwrap -import warnings from functools import partial from optparse import SUPPRESS_HELP, Option, OptionGroup, OptionParser, Values from textwrap import dedent @@ -21,7 +22,6 @@ from typing import Any, Callable, Dict, Optional, Tuple from pip._vendor.packaging.utils import canonicalize_name from pip._internal.cli.parser import ConfigOptionParser -from pip._internal.cli.progress_bars import BAR_TYPES from pip._internal.exceptions import CommandError from pip._internal.locations import USER_CACHE_DIR, get_src_prefix from pip._internal.models.format_control import FormatControl @@ -30,9 +30,10 @@ from pip._internal.models.target_python import TargetPython from pip._internal.utils.hashes import STRONG_HASHES from pip._internal.utils.misc import strtobool +logger = logging.getLogger(__name__) -def raise_option_error(parser, option, msg): - # type: (OptionParser, Option, str) -> None + +def raise_option_error(parser: OptionParser, option: Option, msg: str) -> None: """ Raise an option parsing error using parser.error(). @@ -46,8 +47,7 @@ def raise_option_error(parser, option, msg): parser.error(msg) -def make_option_group(group, parser): - # type: (Dict[str, Any], ConfigOptionParser) -> OptionGroup +def make_option_group(group: Dict[str, Any], parser: ConfigOptionParser) -> OptionGroup: """ Return an OptionGroup object group -- assumed to be dict with 'name' and 'options' keys @@ -59,34 +59,7 @@ def make_option_group(group, parser): return option_group -def check_install_build_global(options, check_options=None): - # type: (Values, Optional[Values]) -> None - """Disable wheels if per-setup.py call options are set. - - :param options: The OptionParser options to update. - :param check_options: The options to check, if not supplied defaults to - options. - """ - if check_options is None: - check_options = options - - def getname(n): - # type: (str) -> Optional[Any] - return getattr(check_options, n, None) - - names = ["build_options", "global_options", "install_options"] - if any(map(getname, names)): - control = options.format_control - control.disallow_binaries() - warnings.warn( - "Disabling all use of wheels due to the use of --build-option " - "/ --global-option / --install-option.", - stacklevel=2, - ) - - -def check_dist_restriction(options, check_target=False): - # type: (Values, bool) -> None +def check_dist_restriction(options: Values, check_target: bool = False) -> None: """Function for determining if custom platform options are allowed. :param options: The OptionParser options. @@ -126,13 +99,11 @@ def check_dist_restriction(options, check_target=False): ) -def _path_option_check(option, opt, value): - # type: (Option, str, str) -> str +def _path_option_check(option: Option, opt: str, value: str) -> str: return os.path.expanduser(value) -def _package_name_option_check(option, opt, value): - # type: (Option, str, str) -> str +def _package_name_option_check(option: Option, opt: str, value: str) -> str: return canonicalize_name(value) @@ -147,16 +118,28 @@ class PipOption(Option): # options # ########### -help_ = partial( +help_: Callable[..., Option] = partial( Option, "-h", "--help", dest="help", action="help", help="Show help.", -) # type: Callable[..., Option] +) + +debug_mode: Callable[..., Option] = partial( + Option, + "--debug", + dest="debug_mode", + action="store_true", + default=False, + help=( + "Let unhandled exceptions propagate outside the main subroutine, " + "instead of logging them to stderr." + ), +) -isolated_mode = partial( +isolated_mode: Callable[..., Option] = partial( Option, "--isolated", dest="isolated_mode", @@ -166,20 +149,37 @@ isolated_mode = partial( "Run pip in an isolated mode, ignoring environment variables and user " "configuration." ), -) # type: Callable[..., Option] +) -require_virtualenv = partial( +require_virtualenv: Callable[..., Option] = partial( Option, - # Run only if inside a virtualenv, bail if not. "--require-virtualenv", "--require-venv", dest="require_venv", action="store_true", default=False, - help=SUPPRESS_HELP, -) # type: Callable[..., Option] + help=( + "Allow pip to only run in a virtual environment; " + "exit with an error otherwise." + ), +) + +override_externally_managed: Callable[..., Option] = partial( + Option, + "--break-system-packages", + dest="override_externally_managed", + action="store_true", + help="Allow pip to modify an EXTERNALLY-MANAGED Python installation", +) -verbose = partial( +python: Callable[..., Option] = partial( + Option, + "--python", + dest="python", + help="Run pip with the specified Python interpreter.", +) + +verbose: Callable[..., Option] = partial( Option, "-v", "--verbose", @@ -187,27 +187,27 @@ verbose = partial( action="count", default=0, help="Give more output. Option is additive, and can be used up to 3 times.", -) # type: Callable[..., Option] +) -no_color = partial( +no_color: Callable[..., Option] = partial( Option, "--no-color", dest="no_color", action="store_true", default=False, help="Suppress colored output.", -) # type: Callable[..., Option] +) -version = partial( +version: Callable[..., Option] = partial( Option, "-V", "--version", dest="version", action="store_true", help="Show version and exit.", -) # type: Callable[..., Option] +) -quiet = partial( +quiet: Callable[..., Option] = partial( Option, "-q", "--quiet", @@ -219,23 +219,19 @@ quiet = partial( " times (corresponding to WARNING, ERROR, and CRITICAL logging" " levels)." ), -) # type: Callable[..., Option] +) -progress_bar = partial( +progress_bar: Callable[..., Option] = partial( Option, "--progress-bar", dest="progress_bar", type="choice", - choices=list(BAR_TYPES.keys()), + choices=["on", "off"], default="on", - help=( - "Specify type of progress to be displayed [" - + "|".join(BAR_TYPES.keys()) - + "] (default: %default)" - ), -) # type: Callable[..., Option] + help="Specify whether the progress bar should be used [on, off] (default: on)", +) -log = partial( +log: Callable[..., Option] = partial( PipOption, "--log", "--log-file", @@ -244,9 +240,9 @@ log = partial( metavar="path", type="path", help="Path to a verbose appending log.", -) # type: Callable[..., Option] +) -no_input = partial( +no_input: Callable[..., Option] = partial( Option, # Don't ask for input "--no-input", @@ -254,18 +250,31 @@ no_input = partial( action="store_true", default=False, help="Disable prompting for input.", -) # type: Callable[..., Option] +) + +keyring_provider: Callable[..., Option] = partial( + Option, + "--keyring-provider", + dest="keyring_provider", + choices=["auto", "disabled", "import", "subprocess"], + default="auto", + help=( + "Enable the credential lookup via the keyring library if user input is allowed." + " Specify which mechanism to use [disabled, import, subprocess]." + " (default: disabled)" + ), +) -proxy = partial( +proxy: Callable[..., Option] = partial( Option, "--proxy", dest="proxy", type="str", default="", - help="Specify a proxy in the form [user:passwd@]proxy.server:port.", -) # type: Callable[..., Option] + help="Specify a proxy in the form scheme://[user:passwd@]proxy.server:port.", +) -retries = partial( +retries: Callable[..., Option] = partial( Option, "--retries", dest="retries", @@ -273,9 +282,9 @@ retries = partial( default=5, help="Maximum number of retries each connection should attempt " "(default %default times).", -) # type: Callable[..., Option] +) -timeout = partial( +timeout: Callable[..., Option] = partial( Option, "--timeout", "--default-timeout", @@ -284,11 +293,10 @@ timeout = partial( type="float", default=15, help="Set the socket timeout (default %default seconds).", -) # type: Callable[..., Option] +) -def exists_action(): - # type: () -> Option +def exists_action() -> Option: return Option( # Option when path already exist "--exists-action", @@ -303,7 +311,7 @@ def exists_action(): ) -cert = partial( +cert: Callable[..., Option] = partial( PipOption, "--cert", dest="cert", @@ -315,9 +323,9 @@ cert = partial( "See 'SSL Certificate Verification' in pip documentation " "for more information." ), -) # type: Callable[..., Option] +) -client_cert = partial( +client_cert: Callable[..., Option] = partial( PipOption, "--client-cert", dest="client_cert", @@ -326,9 +334,9 @@ client_cert = partial( metavar="path", help="Path to SSL client certificate, a single file containing the " "private key and the certificate in PEM format.", -) # type: Callable[..., Option] +) -index_url = partial( +index_url: Callable[..., Option] = partial( Option, "-i", "--index-url", @@ -340,11 +348,10 @@ index_url = partial( "This should point to a repository compliant with PEP 503 " "(the simple repository API) or a local directory laid out " "in the same format.", -) # type: Callable[..., Option] +) -def extra_index_url(): - # type: () -> Option +def extra_index_url() -> Option: return Option( "--extra-index-url", dest="extra_index_urls", @@ -357,18 +364,17 @@ def extra_index_url(): ) -no_index = partial( +no_index: Callable[..., Option] = partial( Option, "--no-index", dest="no_index", action="store_true", default=False, help="Ignore package index (only looking at --find-links URLs instead).", -) # type: Callable[..., Option] +) -def find_links(): - # type: () -> Option +def find_links() -> Option: return Option( "-f", "--find-links", @@ -378,14 +384,13 @@ def find_links(): metavar="url", help="If a URL or path to an html file, then parse for links to " "archives such as sdist (.tar.gz) or wheel (.whl) files. " - "If a local path or file:// URL that's a directory, " + "If a local path or file:// URL that's a directory, " "then look for archives in the directory listing. " "Links to VCS project URLs are not supported.", ) -def trusted_host(): - # type: () -> Option +def trusted_host() -> Option: return Option( "--trusted-host", dest="trusted_hosts", @@ -397,8 +402,7 @@ def trusted_host(): ) -def constraints(): - # type: () -> Option +def constraints() -> Option: return Option( "-c", "--constraint", @@ -411,8 +415,7 @@ def constraints(): ) -def requirements(): - # type: () -> Option +def requirements() -> Option: return Option( "-r", "--requirement", @@ -425,8 +428,7 @@ def requirements(): ) -def editable(): - # type: () -> Option +def editable() -> Option: return Option( "-e", "--editable", @@ -441,13 +443,12 @@ def editable(): ) -def _handle_src(option, opt_str, value, parser): - # type: (Option, str, str, OptionParser) -> None +def _handle_src(option: Option, opt_str: str, value: str, parser: OptionParser) -> None: value = os.path.abspath(value) setattr(parser.values, option.dest, value) -src = partial( +src: Callable[..., Option] = partial( PipOption, "--src", "--source", @@ -462,17 +463,17 @@ src = partial( help="Directory to check out editable projects into. " 'The default in a virtualenv is "<venv path>/src". ' 'The default for global installs is "<current dir>/src".', -) # type: Callable[..., Option] +) -def _get_format_control(values, option): - # type: (Values, Option) -> Any +def _get_format_control(values: Values, option: Option) -> Any: """Get a format_control object.""" return getattr(values, option.dest) -def _handle_no_binary(option, opt_str, value, parser): - # type: (Option, str, str, OptionParser) -> None +def _handle_no_binary( + option: Option, opt_str: str, value: str, parser: OptionParser +) -> None: existing = _get_format_control(parser.values, option) FormatControl.handle_mutual_excludes( value, @@ -481,8 +482,9 @@ def _handle_no_binary(option, opt_str, value, parser): ) -def _handle_only_binary(option, opt_str, value, parser): - # type: (Option, str, str, OptionParser) -> None +def _handle_only_binary( + option: Option, opt_str: str, value: str, parser: OptionParser +) -> None: existing = _get_format_control(parser.values, option) FormatControl.handle_mutual_excludes( value, @@ -491,8 +493,7 @@ def _handle_only_binary(option, opt_str, value, parser): ) -def no_binary(): - # type: () -> Option +def no_binary() -> Option: format_control = FormatControl(set(), set()) return Option( "--no-binary", @@ -510,8 +511,7 @@ def no_binary(): ) -def only_binary(): - # type: () -> Option +def only_binary() -> Option: format_control = FormatControl(set(), set()) return Option( "--only-binary", @@ -529,7 +529,7 @@ def only_binary(): ) -platforms = partial( +platforms: Callable[..., Option] = partial( Option, "--platform", dest="platforms", @@ -541,12 +541,11 @@ platforms = partial( "platform of the running system. Use this option multiple times to " "specify multiple platforms supported by the target interpreter." ), -) # type: Callable[..., Option] +) # This was made a separate function for unit-testing purposes. -def _convert_python_version(value): - # type: (str) -> Tuple[Tuple[int, ...], Optional[str]] +def _convert_python_version(value: str) -> Tuple[Tuple[int, ...], Optional[str]]: """ Convert a version string like "3", "37", or "3.7.3" into a tuple of ints. @@ -575,8 +574,9 @@ def _convert_python_version(value): return (version_info, None) -def _handle_python_version(option, opt_str, value, parser): - # type: (Option, str, str, OptionParser) -> None +def _handle_python_version( + option: Option, opt_str: str, value: str, parser: OptionParser +) -> None: """ Handle a provided --python-version value. """ @@ -591,7 +591,7 @@ def _handle_python_version(option, opt_str, value, parser): parser.values.python_version = version_info -python_version = partial( +python_version: Callable[..., Option] = partial( Option, "--python-version", dest="python_version", @@ -609,10 +609,10 @@ python_version = partial( version can also be given as a string without dots (e.g. "37" for 3.7.0). """ ), -) # type: Callable[..., Option] +) -implementation = partial( +implementation: Callable[..., Option] = partial( Option, "--implementation", dest="implementation", @@ -625,10 +625,10 @@ implementation = partial( "interpreter implementation is used. Use 'py' to force " "implementation-agnostic wheels." ), -) # type: Callable[..., Option] +) -abis = partial( +abis: Callable[..., Option] = partial( Option, "--abi", dest="abis", @@ -643,19 +643,17 @@ abis = partial( "--implementation, --platform, and --python-version when using this " "option." ), -) # type: Callable[..., Option] +) -def add_target_python_options(cmd_opts): - # type: (OptionGroup) -> None +def add_target_python_options(cmd_opts: OptionGroup) -> None: cmd_opts.add_option(platforms()) cmd_opts.add_option(python_version()) cmd_opts.add_option(implementation()) cmd_opts.add_option(abis()) -def make_target_python(options): - # type: (Values) -> TargetPython +def make_target_python(options: Values) -> TargetPython: target_python = TargetPython( platforms=options.platforms, py_version_info=options.python_version, @@ -666,8 +664,7 @@ def make_target_python(options): return target_python -def prefer_binary(): - # type: () -> Option +def prefer_binary() -> Option: return Option( "--prefer-binary", dest="prefer_binary", @@ -677,7 +674,7 @@ def prefer_binary(): ) -cache_dir = partial( +cache_dir: Callable[..., Option] = partial( PipOption, "--cache-dir", dest="cache_dir", @@ -685,11 +682,12 @@ cache_dir = partial( metavar="dir", type="path", help="Store the cache data in <dir>.", -) # type: Callable[..., Option] +) -def _handle_no_cache_dir(option, opt, value, parser): - # type: (Option, str, str, OptionParser) -> None +def _handle_no_cache_dir( + option: Option, opt: str, value: str, parser: OptionParser +) -> None: """ Process a value provided for the --no-cache-dir option. @@ -716,16 +714,16 @@ def _handle_no_cache_dir(option, opt, value, parser): parser.values.cache_dir = False -no_cache = partial( +no_cache: Callable[..., Option] = partial( Option, "--no-cache-dir", dest="cache_dir", action="callback", callback=_handle_no_cache_dir, help="Disable the cache.", -) # type: Callable[..., Option] +) -no_deps = partial( +no_deps: Callable[..., Option] = partial( Option, "--no-deps", "--no-dependencies", @@ -733,29 +731,17 @@ no_deps = partial( action="store_true", default=False, help="Don't install package dependencies.", -) # type: Callable[..., Option] - -build_dir = partial( - PipOption, - "-b", - "--build", - "--build-dir", - "--build-directory", - dest="build_dir", - type="path", - metavar="dir", - help=SUPPRESS_HELP, -) # type: Callable[..., Option] +) -ignore_requires_python = partial( +ignore_requires_python: Callable[..., Option] = partial( Option, "--ignore-requires-python", dest="ignore_requires_python", action="store_true", help="Ignore the Requires-Python information.", -) # type: Callable[..., Option] +) -no_build_isolation = partial( +no_build_isolation: Callable[..., Option] = partial( Option, "--no-build-isolation", dest="build_isolation", @@ -764,11 +750,21 @@ no_build_isolation = partial( help="Disable isolation when building a modern source distribution. " "Build dependencies specified by PEP 518 must be already installed " "if this option is used.", -) # type: Callable[..., Option] +) + +check_build_deps: Callable[..., Option] = partial( + Option, + "--check-build-dependencies", + dest="check_build_deps", + action="store_true", + default=False, + help="Check the build dependencies when PEP517 is used.", +) -def _handle_no_use_pep517(option, opt, value, parser): - # type: (Option, str, str, OptionParser) -> None +def _handle_no_use_pep517( + option: Option, opt: str, value: str, parser: OptionParser +) -> None: """ Process a value provided for the --no-use-pep517 option. @@ -787,11 +783,21 @@ def _handle_no_use_pep517(option, opt, value, parser): """ raise_option_error(parser, option=option, msg=msg) + # If user doesn't wish to use pep517, we check if setuptools and wheel are installed + # and raise error if it is not. + packages = ("setuptools", "wheel") + if not all(importlib.util.find_spec(package) for package in packages): + msg = ( + f"It is not possible to use --no-use-pep517 " + f"without {' and '.join(packages)} installed." + ) + raise_option_error(parser, option=option, msg=msg) + # Otherwise, --no-use-pep517 was passed via the command-line. parser.values.use_pep517 = False -use_pep517 = partial( +use_pep517: Any = partial( Option, "--use-pep517", dest="use_pep517", @@ -799,9 +805,9 @@ use_pep517 = partial( default=None, help="Use PEP 517 for building source distributions " "(use --no-use-pep517 to force legacy behaviour).", -) # type: Any +) -no_use_pep517 = partial( +no_use_pep517: Any = partial( Option, "--no-use-pep517", dest="use_pep517", @@ -809,31 +815,52 @@ no_use_pep517 = partial( callback=_handle_no_use_pep517, default=None, help=SUPPRESS_HELP, -) # type: Any - -install_options = partial( +) + + +def _handle_config_settings( + option: Option, opt_str: str, value: str, parser: OptionParser +) -> None: + key, sep, val = value.partition("=") + if sep != "=": + parser.error(f"Arguments to {opt_str} must be of the form KEY=VAL") # noqa + dest = getattr(parser.values, option.dest) + if dest is None: + dest = {} + setattr(parser.values, option.dest, dest) + if key in dest: + if isinstance(dest[key], list): + dest[key].append(val) + else: + dest[key] = [dest[key], val] + else: + dest[key] = val + + +config_settings: Callable[..., Option] = partial( Option, - "--install-option", - dest="install_options", - action="append", - metavar="options", - help="Extra arguments to be supplied to the setup.py install " - 'command (use like --install-option="--install-scripts=/usr/local/' - 'bin"). Use multiple --install-option options to pass multiple ' - "options to setup.py install. If you are using an option with a " - "directory path, be sure to use absolute path.", -) # type: Callable[..., Option] - -build_options = partial( + "-C", + "--config-settings", + dest="config_settings", + type=str, + action="callback", + callback=_handle_config_settings, + metavar="settings", + help="Configuration settings to be passed to the PEP 517 build backend. " + "Settings take the form KEY=VALUE. Use multiple --config-settings options " + "to pass multiple keys to the backend.", +) + +build_options: Callable[..., Option] = partial( Option, "--build-option", dest="build_options", metavar="options", action="append", help="Extra arguments to be supplied to 'setup.py bdist_wheel'.", -) # type: Callable[..., Option] +) -global_options = partial( +global_options: Callable[..., Option] = partial( Option, "--global-option", dest="global_options", @@ -841,26 +868,26 @@ global_options = partial( metavar="options", help="Extra global options to be supplied to the setup.py " "call before the install or bdist_wheel command.", -) # type: Callable[..., Option] +) -no_clean = partial( +no_clean: Callable[..., Option] = partial( Option, "--no-clean", action="store_true", default=False, help="Don't clean up build directories.", -) # type: Callable[..., Option] +) -pre = partial( +pre: Callable[..., Option] = partial( Option, "--pre", action="store_true", default=False, help="Include pre-release and development versions. By default, " "pip only finds stable versions.", -) # type: Callable[..., Option] +) -disable_pip_version_check = partial( +disable_pip_version_check: Callable[..., Option] = partial( Option, "--disable-pip-version-check", dest="disable_pip_version_check", @@ -868,11 +895,21 @@ disable_pip_version_check = partial( default=False, help="Don't periodically check PyPI to determine whether a new version " "of pip is available for download. Implied with --no-index.", -) # type: Callable[..., Option] +) + +root_user_action: Callable[..., Option] = partial( + Option, + "--root-user-action", + dest="root_user_action", + default="warn", + choices=["warn", "ignore"], + help="Action if pip is run as a root user. By default, a warning message is shown.", +) -def _handle_merge_hash(option, opt_str, value, parser): - # type: (Option, str, str, OptionParser) -> None +def _handle_merge_hash( + option: Option, opt_str: str, value: str, parser: OptionParser +) -> None: """Given a value spelled "algo:digest", append the digest to a list pointed to in a dict by the algo name.""" if not parser.values.hashes: @@ -894,7 +931,7 @@ def _handle_merge_hash(option, opt_str, value, parser): parser.values.hashes.setdefault(algo, []).append(digest) -hash = partial( +hash: Callable[..., Option] = partial( Option, "--hash", # Hash values eventually end up in InstallRequirement.hashes due to @@ -905,10 +942,10 @@ hash = partial( type="string", help="Verify that the package's archive matches this " "hash before installing. Example: --hash=sha256:abcdef...", -) # type: Callable[..., Option] +) -require_hashes = partial( +require_hashes: Callable[..., Option] = partial( Option, "--require-hashes", dest="require_hashes", @@ -917,10 +954,10 @@ require_hashes = partial( help="Require a hash to check each requirement against, for " "repeatable installs. This option is implied when any package in a " "requirements file has a --hash option.", -) # type: Callable[..., Option] +) -list_path = partial( +list_path: Callable[..., Option] = partial( PipOption, "--path", dest="path", @@ -928,16 +965,15 @@ list_path = partial( action="append", help="Restrict to the specified installation path for listing " "packages (can be used multiple times).", -) # type: Callable[..., Option] +) -def check_list_path_option(options): - # type: (Values) -> None +def check_list_path_option(options: Values) -> None: if options.path and (options.user or options.local): raise CommandError("Cannot combine '--path' with '--user' or '--local'") -list_exclude = partial( +list_exclude: Callable[..., Option] = partial( PipOption, "--exclude", dest="excludes", @@ -945,57 +981,71 @@ list_exclude = partial( metavar="package", type="package_name", help="Exclude specified package from the output", -) # type: Callable[..., Option] +) -no_python_version_warning = partial( +no_python_version_warning: Callable[..., Option] = partial( Option, "--no-python-version-warning", dest="no_python_version_warning", action="store_true", default=False, help="Silence deprecation warnings for upcoming unsupported Pythons.", -) # type: Callable[..., Option] +) -use_new_feature = partial( +# Features that are now always on. A warning is printed if they are used. +ALWAYS_ENABLED_FEATURES = [ + "no-binary-enable-wheel-cache", # always on since 23.1 +] + +use_new_feature: Callable[..., Option] = partial( Option, "--use-feature", dest="features_enabled", metavar="feature", action="append", default=[], - choices=["2020-resolver", "fast-deps", "in-tree-build"], + choices=[ + "fast-deps", + "truststore", + ] + + ALWAYS_ENABLED_FEATURES, help="Enable new functionality, that may be backward incompatible.", -) # type: Callable[..., Option] +) -use_deprecated_feature = partial( +use_deprecated_feature: Callable[..., Option] = partial( Option, "--use-deprecated", dest="deprecated_features_enabled", metavar="feature", action="append", default=[], - choices=["legacy-resolver"], + choices=[ + "legacy-resolver", + ], help=("Enable deprecated functionality, that will be removed in the future."), -) # type: Callable[..., Option] +) ########## # groups # ########## -general_group = { +general_group: Dict[str, Any] = { "name": "General Options", "options": [ help_, + debug_mode, isolated_mode, require_virtualenv, + python, verbose, version, quiet, log, no_input, + keyring_provider, proxy, retries, timeout, @@ -1011,9 +1061,9 @@ general_group = { use_new_feature, use_deprecated_feature, ], -} # type: Dict[str, Any] +} -index_group = { +index_group: Dict[str, Any] = { "name": "Package Index Options", "options": [ index_url, @@ -1021,4 +1071,4 @@ index_group = { no_index, find_links, ], -} # type: Dict[str, Any] +} diff --git a/env/Lib/site-packages/pip/_internal/cli/command_context.py b/env/Lib/site-packages/pip/_internal/cli/command_context.py index 375a2e3660b7a65ce5dd38d79a61799169c4242d..139995ac3f109a82664e4913f7ebc32ecf7617e1 100644 --- a/env/Lib/site-packages/pip/_internal/cli/command_context.py +++ b/env/Lib/site-packages/pip/_internal/cli/command_context.py @@ -1,19 +1,17 @@ from contextlib import ExitStack, contextmanager -from typing import ContextManager, Iterator, TypeVar +from typing import ContextManager, Generator, TypeVar _T = TypeVar("_T", covariant=True) class CommandContextMixIn: - def __init__(self): - # type: () -> None + def __init__(self) -> None: super().__init__() self._in_main_context = False self._main_context = ExitStack() @contextmanager - def main_context(self): - # type: () -> Iterator[None] + def main_context(self) -> Generator[None, None, None]: assert not self._in_main_context self._in_main_context = True @@ -23,8 +21,7 @@ class CommandContextMixIn: finally: self._in_main_context = False - def enter_context(self, context_provider): - # type: (ContextManager[_T]) -> _T + def enter_context(self, context_provider: ContextManager[_T]) -> _T: assert self._in_main_context return self._main_context.enter_context(context_provider) diff --git a/env/Lib/site-packages/pip/_internal/cli/main.py b/env/Lib/site-packages/pip/_internal/cli/main.py index 7ae074b59d5d781e9a058a9e13f25d018c3d02ae..7e061f5b39081f39e9f4fa2a0e88aec0e0a3da79 100644 --- a/env/Lib/site-packages/pip/_internal/cli/main.py +++ b/env/Lib/site-packages/pip/_internal/cli/main.py @@ -4,6 +4,7 @@ import locale import logging import os import sys +import warnings from typing import List, Optional from pip._internal.cli.autocompletion import autocomplete @@ -42,11 +43,18 @@ logger = logging.getLogger(__name__) # main, this should not be an issue in practice. -def main(args=None): - # type: (Optional[List[str]]) -> int +def main(args: Optional[List[str]] = None) -> int: if args is None: args = sys.argv[1:] + # Suppress the pkg_resources deprecation warning + # Note - we use a module of .*pkg_resources to cover + # the normal case (pip._vendor.pkg_resources) and the + # devendored case (a bare pkg_resources) + warnings.filterwarnings( + action="ignore", category=DeprecationWarning, module=".*pkg_resources" + ) + # Configure our deprecation warnings to be sent through loggers deprecation.install_warning_logger() diff --git a/env/Lib/site-packages/pip/_internal/cli/main_parser.py b/env/Lib/site-packages/pip/_internal/cli/main_parser.py index d0f58fe421b86e5f12e0092b6c0d9bb946da5890..5ade356b9c2f3e375bf598635627870f248c0cc3 100644 --- a/env/Lib/site-packages/pip/_internal/cli/main_parser.py +++ b/env/Lib/site-packages/pip/_internal/cli/main_parser.py @@ -2,9 +2,11 @@ """ import os +import subprocess import sys -from typing import List, Tuple +from typing import List, Optional, Tuple +from pip._internal.build_env import get_runnable_pip from pip._internal.cli import cmdoptions from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter from pip._internal.commands import commands_dict, get_similar_commands @@ -14,8 +16,7 @@ from pip._internal.utils.misc import get_pip_version, get_prog __all__ = ["create_main_parser", "parse_command"] -def create_main_parser(): - # type: () -> ConfigOptionParser +def create_main_parser() -> ConfigOptionParser: """Creates and returns the main parser for pip's CLI""" parser = ConfigOptionParser( @@ -46,8 +47,26 @@ def create_main_parser(): return parser -def parse_command(args): - # type: (List[str]) -> Tuple[str, List[str]] +def identify_python_interpreter(python: str) -> Optional[str]: + # If the named file exists, use it. + # If it's a directory, assume it's a virtual environment and + # look for the environment's Python executable. + if os.path.exists(python): + if os.path.isdir(python): + # bin/python for Unix, Scripts/python.exe for Windows + # Try both in case of odd cases like cygwin. + for exe in ("bin/python", "Scripts/python.exe"): + py = os.path.join(python, exe) + if os.path.exists(py): + return py + else: + return python + + # Could not find the interpreter specified + return None + + +def parse_command(args: List[str]) -> Tuple[str, List[str]]: parser = create_main_parser() # Note: parser calls disable_interspersed_args(), so the result of this @@ -59,6 +78,32 @@ def parse_command(args): # args_else: ['install', '--user', 'INITools'] general_options, args_else = parser.parse_args(args) + # --python + if general_options.python and "_PIP_RUNNING_IN_SUBPROCESS" not in os.environ: + # Re-invoke pip using the specified Python interpreter + interpreter = identify_python_interpreter(general_options.python) + if interpreter is None: + raise CommandError( + f"Could not locate Python interpreter {general_options.python}" + ) + + pip_cmd = [ + interpreter, + get_runnable_pip(), + ] + pip_cmd.extend(args) + + # Set a flag so the child doesn't re-invoke itself, causing + # an infinite loop. + os.environ["_PIP_RUNNING_IN_SUBPROCESS"] = "1" + returncode = 0 + try: + proc = subprocess.run(pip_cmd) + returncode = proc.returncode + except (subprocess.SubprocessError, OSError) as exc: + raise CommandError(f"Failed to run pip under {interpreter}: {exc}") + sys.exit(returncode) + # --version if general_options.version: sys.stdout.write(parser.version) diff --git a/env/Lib/site-packages/pip/_internal/cli/parser.py b/env/Lib/site-packages/pip/_internal/cli/parser.py index 16523c5a19c191d1a5219eca99a9ef78dac6320d..c762cf2781d460288674314959c727e860aad067 100644 --- a/env/Lib/site-packages/pip/_internal/cli/parser.py +++ b/env/Lib/site-packages/pip/_internal/cli/parser.py @@ -6,7 +6,7 @@ import shutil import sys import textwrap from contextlib import suppress -from typing import Any, Dict, Iterator, List, Tuple +from typing import Any, Dict, Generator, List, Tuple from pip._internal.cli.status_codes import UNKNOWN_ERROR from pip._internal.configuration import Configuration, ConfigurationError @@ -18,20 +18,19 @@ logger = logging.getLogger(__name__) class PrettyHelpFormatter(optparse.IndentedHelpFormatter): """A prettier/less verbose help formatter for optparse.""" - def __init__(self, *args, **kwargs): - # type: (*Any, **Any) -> None + def __init__(self, *args: Any, **kwargs: Any) -> None: # help position must be aligned with __init__.parseopts.description kwargs["max_help_position"] = 30 kwargs["indent_increment"] = 1 kwargs["width"] = shutil.get_terminal_size()[0] - 2 super().__init__(*args, **kwargs) - def format_option_strings(self, option): - # type: (optparse.Option) -> str + def format_option_strings(self, option: optparse.Option) -> str: return self._format_option_strings(option) - def _format_option_strings(self, option, mvarfmt=" <{}>", optsep=", "): - # type: (optparse.Option, str, str) -> str + def _format_option_strings( + self, option: optparse.Option, mvarfmt: str = " <{}>", optsep: str = ", " + ) -> str: """ Return a comma-separated list of option strings and metavars. @@ -55,14 +54,12 @@ class PrettyHelpFormatter(optparse.IndentedHelpFormatter): return "".join(opts) - def format_heading(self, heading): - # type: (str) -> str + def format_heading(self, heading: str) -> str: if heading == "Options": return "" return heading + ":\n" - def format_usage(self, usage): - # type: (str) -> str + def format_usage(self, usage: str) -> str: """ Ensure there is only one newline between usage and the first heading if there is no description. @@ -70,8 +67,7 @@ class PrettyHelpFormatter(optparse.IndentedHelpFormatter): msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " ")) return msg - def format_description(self, description): - # type: (str) -> str + def format_description(self, description: str) -> str: # leave full control over description to us if description: if hasattr(self.parser, "main"): @@ -89,16 +85,14 @@ class PrettyHelpFormatter(optparse.IndentedHelpFormatter): else: return "" - def format_epilog(self, epilog): - # type: (str) -> str + def format_epilog(self, epilog: str) -> str: # leave full control over epilog to us if epilog: return epilog else: return "" - def indent_lines(self, text, indent): - # type: (str, str) -> str + def indent_lines(self, text: str, indent: str) -> str: new_lines = [indent + line for line in text.split("\n")] return "\n".join(new_lines) @@ -112,8 +106,7 @@ class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): Also redact auth from url type options """ - def expand_default(self, option): - # type: (optparse.Option) -> str + def expand_default(self, option: optparse.Option) -> str: default_values = None if self.parser is not None: assert isinstance(self.parser, ConfigOptionParser) @@ -137,8 +130,9 @@ class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): class CustomOptionParser(optparse.OptionParser): - def insert_option_group(self, idx, *args, **kwargs): - # type: (int, Any, Any) -> optparse.OptionGroup + def insert_option_group( + self, idx: int, *args: Any, **kwargs: Any + ) -> optparse.OptionGroup: """Insert an OptionGroup at a given position.""" group = self.add_option_group(*args, **kwargs) @@ -148,8 +142,7 @@ class CustomOptionParser(optparse.OptionParser): return group @property - def option_list_all(self): - # type: () -> List[optparse.Option] + def option_list_all(self) -> List[optparse.Option]: """Get a list of all options, including those in option groups.""" res = self.option_list[:] for i in self.option_groups: @@ -164,35 +157,34 @@ class ConfigOptionParser(CustomOptionParser): def __init__( self, - *args, # type: Any - name, # type: str - isolated=False, # type: bool - **kwargs, # type: Any - ): - # type: (...) -> None + *args: Any, + name: str, + isolated: bool = False, + **kwargs: Any, + ) -> None: self.name = name self.config = Configuration(isolated) assert self.name super().__init__(*args, **kwargs) - def check_default(self, option, key, val): - # type: (optparse.Option, str, Any) -> Any + def check_default(self, option: optparse.Option, key: str, val: Any) -> Any: try: return option.check_value(key, val) except optparse.OptionValueError as exc: print(f"An error occurred during configuration: {exc}") sys.exit(3) - def _get_ordered_configuration_items(self): - # type: () -> Iterator[Tuple[str, Any]] + def _get_ordered_configuration_items( + self, + ) -> Generator[Tuple[str, Any], None, None]: # Configuration gives keys in an unordered manner. Order them. override_order = ["global", self.name, ":env:"] # Pool the options into different groups - section_items = { + section_items: Dict[str, List[Tuple[str, Any]]] = { name: [] for name in override_order - } # type: Dict[str, List[Tuple[str, Any]]] + } for section_key, val in self.config.items(): # ignore empty values if not val: @@ -211,8 +203,7 @@ class ConfigOptionParser(CustomOptionParser): for key, val in section_items[section]: yield key, val - def _update_defaults(self, defaults): - # type: (Dict[str, Any]) -> Dict[str, Any] + def _update_defaults(self, defaults: Dict[str, Any]) -> Dict[str, Any]: """Updates the given defaults with values from the config files and the environ. Does a little special handling for certain types of options (lists).""" @@ -276,8 +267,7 @@ class ConfigOptionParser(CustomOptionParser): self.values = None return defaults - def get_default_values(self): - # type: () -> optparse.Values + def get_default_values(self) -> optparse.Values: """Overriding to make updating the defaults after instantiation of the option parser possible, _update_defaults() does the dirty work.""" if not self.process_default_values: @@ -299,7 +289,6 @@ class ConfigOptionParser(CustomOptionParser): defaults[option.dest] = option.check_value(opt_str, default) return optparse.Values(defaults) - def error(self, msg): - # type: (str) -> None + def error(self, msg: str) -> None: self.print_usage(sys.stderr) self.exit(UNKNOWN_ERROR, f"{msg}\n") diff --git a/env/Lib/site-packages/pip/_internal/cli/progress_bars.py b/env/Lib/site-packages/pip/_internal/cli/progress_bars.py index 3064c85697bdd5536337aeb575af1a493ae942a9..0ad14031ca50c2c348dc0daa8fe7b38af532c0f5 100644 --- a/env/Lib/site-packages/pip/_internal/cli/progress_bars.py +++ b/env/Lib/site-packages/pip/_internal/cli/progress_bars.py @@ -1,261 +1,68 @@ -import itertools -import sys -from signal import SIGINT, default_int_handler, signal -from typing import Any, Dict, List +import functools +from typing import Callable, Generator, Iterable, Iterator, Optional, Tuple + +from pip._vendor.rich.progress import ( + BarColumn, + DownloadColumn, + FileSizeColumn, + Progress, + ProgressColumn, + SpinnerColumn, + TextColumn, + TimeElapsedColumn, + TimeRemainingColumn, + TransferSpeedColumn, +) -from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar -from pip._vendor.progress.spinner import Spinner - -from pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging import get_indentation -from pip._internal.utils.misc import format_size - -try: - from pip._vendor import colorama -# Lots of different errors can come from this, including SystemError and -# ImportError. -except Exception: - colorama = None - - -def _select_progress_class(preferred, fallback): - # type: (Bar, Bar) -> Bar - encoding = getattr(preferred.file, "encoding", None) - # If we don't know what encoding this file is in, then we'll just assume - # that it doesn't support unicode and use the ASCII bar. - if not encoding: - return fallback - - # Collect all of the possible characters we want to use with the preferred - # bar. - characters = [ - getattr(preferred, "empty_fill", ""), - getattr(preferred, "fill", ""), - ] - characters += list(getattr(preferred, "phases", [])) - - # Try to decode the characters we're using for the bar using the encoding - # of the given file, if this works then we'll assume that we can use the - # fancier bar and if not we'll fall back to the plaintext bar. - try: - "".join(characters).encode(encoding) - except UnicodeEncodeError: - return fallback +DownloadProgressRenderer = Callable[[Iterable[bytes]], Iterator[bytes]] + + +def _rich_progress_bar( + iterable: Iterable[bytes], + *, + bar_type: str, + size: int, +) -> Generator[bytes, None, None]: + assert bar_type == "on", "This should only be used in the default mode." + + if not size: + total = float("inf") + columns: Tuple[ProgressColumn, ...] = ( + TextColumn("[progress.description]{task.description}"), + SpinnerColumn("line", speed=1.5), + FileSizeColumn(), + TransferSpeedColumn(), + TimeElapsedColumn(), + ) else: - return preferred - - -_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any - - -class InterruptibleMixin: - """ - Helper to ensure that self.finish() gets called on keyboard interrupt. - - This allows downloads to be interrupted without leaving temporary state - (like hidden cursors) behind. - - This class is similar to the progress library's existing SigIntMixin - helper, but as of version 1.2, that helper has the following problems: - - 1. It calls sys.exit(). - 2. It discards the existing SIGINT handler completely. - 3. It leaves its own handler in place even after an uninterrupted finish, - which will have unexpected delayed effects if the user triggers an - unrelated keyboard interrupt some time after a progress-displaying - download has already completed, for example. - """ - - def __init__(self, *args, **kwargs): - # type: (List[Any], Dict[Any, Any]) -> None - """ - Save the original SIGINT handler for later. - """ - # https://github.com/python/mypy/issues/5887 - super().__init__(*args, **kwargs) # type: ignore - - self.original_handler = signal(SIGINT, self.handle_sigint) - - # If signal() returns None, the previous handler was not installed from - # Python, and we cannot restore it. This probably should not happen, - # but if it does, we must restore something sensible instead, at least. - # The least bad option should be Python's default SIGINT handler, which - # just raises KeyboardInterrupt. - if self.original_handler is None: - self.original_handler = default_int_handler - - def finish(self): - # type: () -> None - """ - Restore the original SIGINT handler after finishing. - - This should happen regardless of whether the progress display finishes - normally, or gets interrupted. - """ - super().finish() # type: ignore - signal(SIGINT, self.original_handler) - - def handle_sigint(self, signum, frame): # type: ignore - """ - Call self.finish() before delegating to the original SIGINT handler. - - This handler should only be in place while the progress display is - active. - """ - self.finish() - self.original_handler(signum, frame) - - -class SilentBar(Bar): - def update(self): - # type: () -> None - pass - - -class BlueEmojiBar(IncrementalBar): - - suffix = "%(percent)d%%" - bar_prefix = " " - bar_suffix = " " - phases = ("\U0001F539", "\U0001F537", "\U0001F535") - - -class DownloadProgressMixin: - def __init__(self, *args, **kwargs): - # type: (List[Any], Dict[Any, Any]) -> None - # https://github.com/python/mypy/issues/5887 - super().__init__(*args, **kwargs) # type: ignore - self.message = (" " * (get_indentation() + 2)) + self.message # type: str - - @property - def downloaded(self): - # type: () -> str - return format_size(self.index) # type: ignore - - @property - def download_speed(self): - # type: () -> str - # Avoid zero division errors... - if self.avg == 0.0: # type: ignore - return "..." - return format_size(1 / self.avg) + "/s" # type: ignore - - @property - def pretty_eta(self): - # type: () -> str - if self.eta: # type: ignore - return f"eta {self.eta_td}" # type: ignore - return "" - - def iter(self, it): # type: ignore - for x in it: - yield x - # B305 is incorrectly raised here - # https://github.com/PyCQA/flake8-bugbear/issues/59 - self.next(len(x)) # noqa: B305 - self.finish() - - -class WindowsMixin: - def __init__(self, *args, **kwargs): - # type: (List[Any], Dict[Any, Any]) -> None - # The Windows terminal does not support the hide/show cursor ANSI codes - # even with colorama. So we'll ensure that hide_cursor is False on - # Windows. - # This call needs to go before the super() call, so that hide_cursor - # is set in time. The base progress bar class writes the "hide cursor" - # code to the terminal in its init, so if we don't set this soon - # enough, we get a "hide" with no corresponding "show"... - if WINDOWS and self.hide_cursor: # type: ignore - self.hide_cursor = False - - # https://github.com/python/mypy/issues/5887 - super().__init__(*args, **kwargs) # type: ignore - - # Check if we are running on Windows and we have the colorama module, - # if we do then wrap our file with it. - if WINDOWS and colorama: - self.file = colorama.AnsiToWin32(self.file) # type: ignore - # The progress code expects to be able to call self.file.isatty() - # but the colorama.AnsiToWin32() object doesn't have that, so we'll - # add it. - self.file.isatty = lambda: self.file.wrapped.isatty() - # The progress code expects to be able to call self.file.flush() - # but the colorama.AnsiToWin32() object doesn't have that, so we'll - # add it. - self.file.flush = lambda: self.file.wrapped.flush() - - -class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin): - - file = sys.stdout - message = "%(percent)d%%" - suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" - - -class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar): - pass - - -class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): - pass - - -class DownloadBar(BaseDownloadProgressBar, Bar): - pass - - -class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar): - pass - - -class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar): - pass - - -class DownloadProgressSpinner( - WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner -): - - file = sys.stdout - suffix = "%(downloaded)s %(download_speed)s" - - def next_phase(self): - # type: () -> str - if not hasattr(self, "_phaser"): - self._phaser = itertools.cycle(self.phases) - return next(self._phaser) - - def update(self): - # type: () -> None - message = self.message % self - phase = self.next_phase() - suffix = self.suffix % self - line = "".join( - [ - message, - " " if message else "", - phase, - " " if suffix else "", - suffix, - ] + total = size + columns = ( + TextColumn("[progress.description]{task.description}"), + BarColumn(), + DownloadColumn(), + TransferSpeedColumn(), + TextColumn("eta"), + TimeRemainingColumn(), ) - self.writeln(line) - + progress = Progress(*columns, refresh_per_second=30) + task_id = progress.add_task(" " * (get_indentation() + 2), total=total) + with progress: + for chunk in iterable: + yield chunk + progress.update(task_id, advance=len(chunk)) -BAR_TYPES = { - "off": (DownloadSilentBar, DownloadSilentBar), - "on": (DefaultDownloadProgressBar, DownloadProgressSpinner), - "ascii": (DownloadBar, DownloadProgressSpinner), - "pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner), - "emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner), -} +def get_download_progress_renderer( + *, bar_type: str, size: Optional[int] = None +) -> DownloadProgressRenderer: + """Get an object that can be used to render the download progress. -def DownloadProgressProvider(progress_bar, max=None): # type: ignore - if max is None or max == 0: - return BAR_TYPES[progress_bar][1]().iter + Returns a callable, that takes an iterable to "wrap". + """ + if bar_type == "on": + return functools.partial(_rich_progress_bar, bar_type=bar_type, size=size) else: - return BAR_TYPES[progress_bar][0](max=max).iter + return iter # no-op, when passed an iterator diff --git a/env/Lib/site-packages/pip/_internal/cli/req_command.py b/env/Lib/site-packages/pip/_internal/cli/req_command.py index 7ed623880acc3298de2c804bc09a43b8d8ebdb32..86070f10c14b14dbfac004d11ba3234d36b70276 100644 --- a/env/Lib/site-packages/pip/_internal/cli/req_command.py +++ b/env/Lib/site-packages/pip/_internal/cli/req_command.py @@ -10,7 +10,7 @@ import os import sys from functools import partial from optparse import Values -from typing import Any, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, List, Optional, Tuple from pip._internal.cache import WheelCache from pip._internal.cli import cmdoptions @@ -22,6 +22,7 @@ from pip._internal.index.package_finder import PackageFinder from pip._internal.models.selection_prefs import SelectionPreferences from pip._internal.models.target_python import TargetPython from pip._internal.network.session import PipSession +from pip._internal.operations.build.build_tracker import BuildTracker from pip._internal.operations.prepare import RequirementPreparer from pip._internal.req.constructors import ( install_req_from_editable, @@ -31,7 +32,6 @@ from pip._internal.req.constructors import ( ) from pip._internal.req.req_file import parse_requirements from pip._internal.req.req_install import InstallRequirement -from pip._internal.req.req_tracker import RequirementTracker from pip._internal.resolution.base import BaseResolver from pip._internal.self_outdated_check import pip_self_version_check from pip._internal.utils.temp_dir import ( @@ -41,23 +41,45 @@ from pip._internal.utils.temp_dir import ( ) from pip._internal.utils.virtualenv import running_under_virtualenv +if TYPE_CHECKING: + from ssl import SSLContext + logger = logging.getLogger(__name__) +def _create_truststore_ssl_context() -> Optional["SSLContext"]: + if sys.version_info < (3, 10): + raise CommandError("The truststore feature is only available for Python 3.10+") + + try: + import ssl + except ImportError: + logger.warning("Disabling truststore since ssl support is missing") + return None + + try: + import truststore + except ImportError: + raise CommandError( + "To use the truststore feature, 'truststore' must be installed into " + "pip's current environment." + ) + + return truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + + class SessionCommandMixin(CommandContextMixIn): """ A class mixin for command classes needing _build_session(). """ - def __init__(self): - # type: () -> None + def __init__(self) -> None: super().__init__() - self._session = None # Optional[PipSession] + self._session: Optional[PipSession] = None @classmethod - def _get_index_urls(cls, options): - # type: (Values) -> Optional[List[str]] + def _get_index_urls(cls, options: Values) -> Optional[List[str]]: """Return a list of index urls from user-provided options.""" index_urls = [] if not getattr(options, "no_index", False): @@ -70,8 +92,7 @@ class SessionCommandMixin(CommandContextMixIn): # Return None rather than an empty list return index_urls or None - def get_default_session(self, options): - # type: (Values) -> PipSession + def get_default_session(self, options: Values) -> PipSession: """Get a default-managed session.""" if self._session is None: self._session = self.enter_context(self._build_session(options)) @@ -81,16 +102,32 @@ class SessionCommandMixin(CommandContextMixIn): assert self._session is not None return self._session - def _build_session(self, options, retries=None, timeout=None): - # type: (Values, Optional[int], Optional[int]) -> PipSession - assert not options.cache_dir or os.path.isabs(options.cache_dir) + def _build_session( + self, + options: Values, + retries: Optional[int] = None, + timeout: Optional[int] = None, + fallback_to_certifi: bool = False, + ) -> PipSession: + cache_dir = options.cache_dir + assert not cache_dir or os.path.isabs(cache_dir) + + if "truststore" in options.features_enabled: + try: + ssl_context = _create_truststore_ssl_context() + except Exception: + if not fallback_to_certifi: + raise + ssl_context = None + else: + ssl_context = None + session = PipSession( - cache=( - os.path.join(options.cache_dir, "http") if options.cache_dir else None - ), + cache=os.path.join(cache_dir, "http") if cache_dir else None, retries=retries if retries is not None else options.retries, trusted_hosts=options.trusted_hosts, index_urls=self._get_index_urls(options), + ssl_context=ssl_context, ) # Handle custom ca-bundles from the user @@ -114,6 +151,7 @@ class SessionCommandMixin(CommandContextMixIn): # Determine if we can prompt the user for authentication or not session.auth.prompting = not options.no_input + session.auth.keyring_provider = options.keyring_provider return session @@ -126,8 +164,7 @@ class IndexGroupCommand(Command, SessionCommandMixin): This also corresponds to the commands that permit the pip version check. """ - def handle_pip_version_check(self, options): - # type: (Values) -> None + def handle_pip_version_check(self, options: Values) -> None: """ Do the pip version check if not disabled. @@ -141,7 +178,14 @@ class IndexGroupCommand(Command, SessionCommandMixin): # Otherwise, check if we're using the latest version of pip available. session = self._build_session( - options, retries=0, timeout=min(5, options.timeout) + options, + retries=0, + timeout=min(5, options.timeout), + # This is set to ensure the function does not fail when truststore is + # specified in use-feature but cannot be loaded. This usually raises a + # CommandError and shows a nice user-facing error, but this function is not + # called in that try-except block. + fallback_to_certifi=True, ) with session: pip_self_version_check(session, options) @@ -154,8 +198,7 @@ KEEPABLE_TEMPDIR_TYPES = [ ] -def warn_if_run_as_root(): - # type: () -> None +def warn_if_run_as_root() -> None: """Output a warning for sudo users on Unix. In a virtual environment, sudo pip still writes to virtualenv. @@ -173,9 +216,10 @@ def warn_if_run_as_root(): # checks: https://mypy.readthedocs.io/en/stable/common_issues.html if sys.platform == "win32" or sys.platform == "cygwin": return - if sys.platform == "darwin" or sys.platform == "linux": - if os.getuid() != 0: - return + + if os.getuid() != 0: + return + logger.warning( "Running pip as the 'root' user can result in broken permissions and " "conflicting behaviour with the system package manager. " @@ -184,19 +228,18 @@ def warn_if_run_as_root(): ) -def with_cleanup(func): - # type: (Any) -> Any +def with_cleanup(func: Any) -> Any: """Decorator for common logic related to managing temporary directories. """ - def configure_tempdir_registry(registry): - # type: (TempDirectoryTypeRegistry) -> None + def configure_tempdir_registry(registry: TempDirectoryTypeRegistry) -> None: for t in KEEPABLE_TEMPDIR_TYPES: registry.set_delete(t, False) - def wrapper(self, options, args): - # type: (RequirementCommand, Values, List[Any]) -> Optional[int] + def wrapper( + self: RequirementCommand, options: Values, args: List[Any] + ) -> Optional[int]: assert self.tempdir_registry is not None if options.no_clean: configure_tempdir_registry(self.tempdir_registry) @@ -214,15 +257,13 @@ def with_cleanup(func): class RequirementCommand(IndexGroupCommand): - def __init__(self, *args, **kw): - # type: (Any, Any) -> None + def __init__(self, *args: Any, **kw: Any) -> None: super().__init__(*args, **kw) self.cmd_opts.add_option(cmdoptions.no_clean()) @staticmethod - def determine_resolver_variant(options): - # type: (Values) -> str + def determine_resolver_variant(options: Values) -> str: """Determines which resolver should be used, based on the given options.""" if "legacy-resolver" in options.deprecated_features_enabled: return "legacy" @@ -232,20 +273,21 @@ class RequirementCommand(IndexGroupCommand): @classmethod def make_requirement_preparer( cls, - temp_build_dir, # type: TempDirectory - options, # type: Values - req_tracker, # type: RequirementTracker - session, # type: PipSession - finder, # type: PackageFinder - use_user_site, # type: bool - download_dir=None, # type: str - ): - # type: (...) -> RequirementPreparer + temp_build_dir: TempDirectory, + options: Values, + build_tracker: BuildTracker, + session: PipSession, + finder: PackageFinder, + use_user_site: bool, + download_dir: Optional[str] = None, + verbosity: int = 0, + ) -> RequirementPreparer: """ Create a RequirementPreparer instance for the given parameters. """ temp_build_dir_path = temp_build_dir.path assert temp_build_dir_path is not None + legacy_resolver = False resolver_variant = cls.determine_resolver_variant(options) if resolver_variant == "2020-resolver": @@ -259,6 +301,7 @@ class RequirementCommand(IndexGroupCommand): "production." ) else: + legacy_resolver = True lazy_wheel = False if "fast-deps" in options.features_enabled: logger.warning( @@ -270,32 +313,33 @@ class RequirementCommand(IndexGroupCommand): src_dir=options.src_dir, download_dir=download_dir, build_isolation=options.build_isolation, - req_tracker=req_tracker, + check_build_deps=options.check_build_deps, + build_tracker=build_tracker, session=session, progress_bar=options.progress_bar, finder=finder, require_hashes=options.require_hashes, use_user_site=use_user_site, lazy_wheel=lazy_wheel, - in_tree_build="in-tree-build" in options.features_enabled, + verbosity=verbosity, + legacy_resolver=legacy_resolver, ) @classmethod def make_resolver( cls, - preparer, # type: RequirementPreparer - finder, # type: PackageFinder - options, # type: Values - wheel_cache=None, # type: Optional[WheelCache] - use_user_site=False, # type: bool - ignore_installed=True, # type: bool - ignore_requires_python=False, # type: bool - force_reinstall=False, # type: bool - upgrade_strategy="to-satisfy-only", # type: str - use_pep517=None, # type: Optional[bool] - py_version_info=None, # type: Optional[Tuple[int, ...]] - ): - # type: (...) -> BaseResolver + preparer: RequirementPreparer, + finder: PackageFinder, + options: Values, + wheel_cache: Optional[WheelCache] = None, + use_user_site: bool = False, + ignore_installed: bool = True, + ignore_requires_python: bool = False, + force_reinstall: bool = False, + upgrade_strategy: str = "to-satisfy-only", + use_pep517: Optional[bool] = None, + py_version_info: Optional[Tuple[int, ...]] = None, + ) -> BaseResolver: """ Create a Resolver instance for the given parameters. """ @@ -342,16 +386,15 @@ class RequirementCommand(IndexGroupCommand): def get_requirements( self, - args, # type: List[str] - options, # type: Values - finder, # type: PackageFinder - session, # type: PipSession - ): - # type: (...) -> List[InstallRequirement] + args: List[str], + options: Values, + finder: PackageFinder, + session: PipSession, + ) -> List[InstallRequirement]: """ Parse command-line arguments into the corresponding requirements. """ - requirements = [] # type: List[InstallRequirement] + requirements: List[InstallRequirement] = [] for filename in options.constraints: for parsed_req in parse_requirements( filename, @@ -370,10 +413,11 @@ class RequirementCommand(IndexGroupCommand): for req in args: req_to_add = install_req_from_line( req, - None, + comes_from=None, isolated=options.isolated_mode, use_pep517=options.use_pep517, user_supplied=True, + config_settings=getattr(options, "config_settings", None), ) requirements.append(req_to_add) @@ -383,6 +427,7 @@ class RequirementCommand(IndexGroupCommand): user_supplied=True, isolated=options.isolated_mode, use_pep517=options.use_pep517, + config_settings=getattr(options, "config_settings", None), ) requirements.append(req_to_add) @@ -396,6 +441,9 @@ class RequirementCommand(IndexGroupCommand): isolated=options.isolated_mode, use_pep517=options.use_pep517, user_supplied=True, + config_settings=parsed_req.options.get("config_settings") + if parsed_req.options + else None, ) requirements.append(req_to_add) @@ -421,8 +469,7 @@ class RequirementCommand(IndexGroupCommand): return requirements @staticmethod - def trace_basic_info(finder): - # type: (PackageFinder) -> None + def trace_basic_info(finder: PackageFinder) -> None: """ Trace basic information about the provided objects. """ @@ -434,12 +481,11 @@ class RequirementCommand(IndexGroupCommand): def _build_package_finder( self, - options, # type: Values - session, # type: PipSession - target_python=None, # type: Optional[TargetPython] - ignore_requires_python=None, # type: Optional[bool] - ): - # type: (...) -> PackageFinder + options: Values, + session: PipSession, + target_python: Optional[TargetPython] = None, + ignore_requires_python: Optional[bool] = None, + ) -> PackageFinder: """ Create a package finder appropriate to this requirement command. diff --git a/env/Lib/site-packages/pip/_internal/cli/spinners.py b/env/Lib/site-packages/pip/_internal/cli/spinners.py index 08e156617c4c112b1fd13583b976377c46e37d69..cf2b976f377c2656afb3d84add8d30b0fc280c03 100644 --- a/env/Lib/site-packages/pip/_internal/cli/spinners.py +++ b/env/Lib/site-packages/pip/_internal/cli/spinners.py @@ -3,9 +3,7 @@ import itertools import logging import sys import time -from typing import IO, Iterator - -from pip._vendor.progress import HIDE_CURSOR, SHOW_CURSOR +from typing import IO, Generator, Optional from pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging import get_indentation @@ -14,25 +12,22 @@ logger = logging.getLogger(__name__) class SpinnerInterface: - def spin(self): - # type: () -> None + def spin(self) -> None: raise NotImplementedError() - def finish(self, final_status): - # type: (str) -> None + def finish(self, final_status: str) -> None: raise NotImplementedError() class InteractiveSpinner(SpinnerInterface): def __init__( self, - message, - file=None, - spin_chars="-\\|/", + message: str, + file: Optional[IO[str]] = None, + spin_chars: str = "-\\|/", # Empirically, 8 updates/second looks nice - min_update_interval_seconds=0.125, + min_update_interval_seconds: float = 0.125, ): - # type: (str, IO[str], str, float) -> None self._message = message if file is None: file = sys.stdout @@ -45,8 +40,7 @@ class InteractiveSpinner(SpinnerInterface): self._file.write(" " * get_indentation() + self._message + " ... ") self._width = 0 - def _write(self, status): - # type: (str) -> None + def _write(self, status: str) -> None: assert not self._finished # Erase what we wrote before by backspacing to the beginning, writing # spaces to overwrite the old text, and then backspacing again @@ -58,16 +52,14 @@ class InteractiveSpinner(SpinnerInterface): self._file.flush() self._rate_limiter.reset() - def spin(self): - # type: () -> None + def spin(self) -> None: if self._finished: return if not self._rate_limiter.ready(): return self._write(next(self._spin_cycle)) - def finish(self, final_status): - # type: (str) -> None + def finish(self, final_status: str) -> None: if self._finished: return self._write(final_status) @@ -81,29 +73,25 @@ class InteractiveSpinner(SpinnerInterface): # act as a keep-alive for systems like Travis-CI that take lack-of-output as # an indication that a task has frozen. class NonInteractiveSpinner(SpinnerInterface): - def __init__(self, message, min_update_interval_seconds=60): - # type: (str, float) -> None + def __init__(self, message: str, min_update_interval_seconds: float = 60.0) -> None: self._message = message self._finished = False self._rate_limiter = RateLimiter(min_update_interval_seconds) self._update("started") - def _update(self, status): - # type: (str) -> None + def _update(self, status: str) -> None: assert not self._finished self._rate_limiter.reset() logger.info("%s: %s", self._message, status) - def spin(self): - # type: () -> None + def spin(self) -> None: if self._finished: return if not self._rate_limiter.ready(): return self._update("still running...") - def finish(self, final_status): - # type: (str) -> None + def finish(self, final_status: str) -> None: if self._finished: return self._update(f"finished with status '{final_status}'") @@ -111,32 +99,28 @@ class NonInteractiveSpinner(SpinnerInterface): class RateLimiter: - def __init__(self, min_update_interval_seconds): - # type: (float) -> None + def __init__(self, min_update_interval_seconds: float) -> None: self._min_update_interval_seconds = min_update_interval_seconds - self._last_update = 0 # type: float + self._last_update: float = 0 - def ready(self): - # type: () -> bool + def ready(self) -> bool: now = time.time() delta = now - self._last_update return delta >= self._min_update_interval_seconds - def reset(self): - # type: () -> None + def reset(self) -> None: self._last_update = time.time() @contextlib.contextmanager -def open_spinner(message): - # type: (str) -> Iterator[SpinnerInterface] +def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]: # Interactive spinner goes directly to sys.stdout rather than being routed # through the logging system, but it acts like it has level INFO, # i.e. it's only displayed if we're at level INFO or better. # Non-interactive spinner goes through the logging system, so it is always # in sync with logging configuration. if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO: - spinner = InteractiveSpinner(message) # type: SpinnerInterface + spinner: SpinnerInterface = InteractiveSpinner(message) else: spinner = NonInteractiveSpinner(message) try: @@ -152,9 +136,12 @@ def open_spinner(message): spinner.finish("done") +HIDE_CURSOR = "\x1b[?25l" +SHOW_CURSOR = "\x1b[?25h" + + @contextlib.contextmanager -def hidden_cursor(file): - # type: (IO[str]) -> Iterator[None] +def hidden_cursor(file: IO[str]) -> Generator[None, None, None]: # The Windows terminal does not support the hide/show cursor ANSI codes, # even via colorama. So don't even try. if WINDOWS: diff --git a/env/Lib/site-packages/pip/_internal/commands/__init__.py b/env/Lib/site-packages/pip/_internal/commands/__init__.py index 31c985fdca51a399bc80d345f0f2401ec706426f..858a41014169b8f0eb1b905fa3bb69c753a1bda5 100644 --- a/env/Lib/site-packages/pip/_internal/commands/__init__.py +++ b/env/Lib/site-packages/pip/_internal/commands/__init__.py @@ -3,87 +3,110 @@ Package containing all pip commands """ import importlib -from collections import OrderedDict, namedtuple -from typing import Any, Optional +from collections import namedtuple +from typing import Any, Dict, Optional from pip._internal.cli.base_command import Command -CommandInfo = namedtuple('CommandInfo', 'module_path, class_name, summary') +CommandInfo = namedtuple("CommandInfo", "module_path, class_name, summary") -# The ordering matters for help display. -# Also, even though the module path starts with the same -# "pip._internal.commands" prefix in each case, we include the full path -# because it makes testing easier (specifically when modifying commands_dict -# in test setup / teardown by adding info for a FakeCommand class defined -# in a test-related module). -# Finally, we need to pass an iterable of pairs here rather than a dict -# so that the ordering won't be lost when using Python 2.7. -commands_dict = OrderedDict([ - ('install', CommandInfo( - 'pip._internal.commands.install', 'InstallCommand', - 'Install packages.', - )), - ('download', CommandInfo( - 'pip._internal.commands.download', 'DownloadCommand', - 'Download packages.', - )), - ('uninstall', CommandInfo( - 'pip._internal.commands.uninstall', 'UninstallCommand', - 'Uninstall packages.', - )), - ('freeze', CommandInfo( - 'pip._internal.commands.freeze', 'FreezeCommand', - 'Output installed packages in requirements format.', - )), - ('list', CommandInfo( - 'pip._internal.commands.list', 'ListCommand', - 'List installed packages.', - )), - ('show', CommandInfo( - 'pip._internal.commands.show', 'ShowCommand', - 'Show information about installed packages.', - )), - ('check', CommandInfo( - 'pip._internal.commands.check', 'CheckCommand', - 'Verify installed packages have compatible dependencies.', - )), - ('config', CommandInfo( - 'pip._internal.commands.configuration', 'ConfigurationCommand', - 'Manage local and global configuration.', - )), - ('search', CommandInfo( - 'pip._internal.commands.search', 'SearchCommand', - 'Search PyPI for packages.', - )), - ('cache', CommandInfo( - 'pip._internal.commands.cache', 'CacheCommand', +# This dictionary does a bunch of heavy lifting for help output: +# - Enables avoiding additional (costly) imports for presenting `--help`. +# - The ordering matters for help display. +# +# Even though the module path starts with the same "pip._internal.commands" +# prefix, the full path makes testing easier (specifically when modifying +# `commands_dict` in test setup / teardown). +commands_dict: Dict[str, CommandInfo] = { + "install": CommandInfo( + "pip._internal.commands.install", + "InstallCommand", + "Install packages.", + ), + "download": CommandInfo( + "pip._internal.commands.download", + "DownloadCommand", + "Download packages.", + ), + "uninstall": CommandInfo( + "pip._internal.commands.uninstall", + "UninstallCommand", + "Uninstall packages.", + ), + "freeze": CommandInfo( + "pip._internal.commands.freeze", + "FreezeCommand", + "Output installed packages in requirements format.", + ), + "inspect": CommandInfo( + "pip._internal.commands.inspect", + "InspectCommand", + "Inspect the python environment.", + ), + "list": CommandInfo( + "pip._internal.commands.list", + "ListCommand", + "List installed packages.", + ), + "show": CommandInfo( + "pip._internal.commands.show", + "ShowCommand", + "Show information about installed packages.", + ), + "check": CommandInfo( + "pip._internal.commands.check", + "CheckCommand", + "Verify installed packages have compatible dependencies.", + ), + "config": CommandInfo( + "pip._internal.commands.configuration", + "ConfigurationCommand", + "Manage local and global configuration.", + ), + "search": CommandInfo( + "pip._internal.commands.search", + "SearchCommand", + "Search PyPI for packages.", + ), + "cache": CommandInfo( + "pip._internal.commands.cache", + "CacheCommand", "Inspect and manage pip's wheel cache.", - )), - ('wheel', CommandInfo( - 'pip._internal.commands.wheel', 'WheelCommand', - 'Build wheels from your requirements.', - )), - ('hash', CommandInfo( - 'pip._internal.commands.hash', 'HashCommand', - 'Compute hashes of package archives.', - )), - ('completion', CommandInfo( - 'pip._internal.commands.completion', 'CompletionCommand', - 'A helper command used for command completion.', - )), - ('debug', CommandInfo( - 'pip._internal.commands.debug', 'DebugCommand', - 'Show information useful for debugging.', - )), - ('help', CommandInfo( - 'pip._internal.commands.help', 'HelpCommand', - 'Show help for commands.', - )), -]) # type: OrderedDict[str, CommandInfo] + ), + "index": CommandInfo( + "pip._internal.commands.index", + "IndexCommand", + "Inspect information available from package indexes.", + ), + "wheel": CommandInfo( + "pip._internal.commands.wheel", + "WheelCommand", + "Build wheels from your requirements.", + ), + "hash": CommandInfo( + "pip._internal.commands.hash", + "HashCommand", + "Compute hashes of package archives.", + ), + "completion": CommandInfo( + "pip._internal.commands.completion", + "CompletionCommand", + "A helper command used for command completion.", + ), + "debug": CommandInfo( + "pip._internal.commands.debug", + "DebugCommand", + "Show information useful for debugging.", + ), + "help": CommandInfo( + "pip._internal.commands.help", + "HelpCommand", + "Show help for commands.", + ), +} -def create_command(name, **kwargs): - # type: (str, **Any) -> Command +def create_command(name: str, **kwargs: Any) -> Command: """ Create an instance of the Command class with the given name. """ @@ -95,8 +118,7 @@ def create_command(name, **kwargs): return command -def get_similar_commands(name): - # type: (str) -> Optional[str] +def get_similar_commands(name: str) -> Optional[str]: """Command name auto-correct.""" from difflib import get_close_matches diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 04f7572180d336fd0d41a6dfd90ae1a40fc5da45..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-39.pyc deleted file mode 100644 index ce122136e9571e86ba5f72497a1a42bcb928e585..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/cache.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-39.pyc deleted file mode 100644 index d1b8d60541ad768db4004959a8f6ad0911cd8ab3..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/check.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-39.pyc deleted file mode 100644 index ef90587f3b654e54f6b96290027aa7e696bf94f3..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/completion.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-39.pyc deleted file mode 100644 index 7e2472bd8f2106dea5261c6baae3a06dbd02249a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-39.pyc deleted file mode 100644 index 2e13ff15c5b4c7755b5d713a303bfd2cfbd7c682..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/debug.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-39.pyc deleted file mode 100644 index 9a6089bad2293ba88c4b265ae8b2786b279e3be6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/download.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-39.pyc deleted file mode 100644 index fcfde785a5b7a033456abe0d7a65cdb9dca147f0..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-39.pyc deleted file mode 100644 index 86395283513416bb9f85def0919200009424cc90..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/hash.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-39.pyc deleted file mode 100644 index 8767c57e9cc95e4746bf30f1e65951f1184267ea..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/help.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-39.pyc deleted file mode 100644 index d2303ddb9525c8b56dc3349e16f9af37930246d2..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/install.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-39.pyc deleted file mode 100644 index e8376fc691603f5cd9336d1b06101f7de917a3cc..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/list.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-39.pyc deleted file mode 100644 index 8f67ca4d02b7926ac728fc80dbeb4b9cbc175acd..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/search.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-39.pyc deleted file mode 100644 index 575e0e6764f229fdd77450afb66c5e5dbf14a090..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/show.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-39.pyc deleted file mode 100644 index ac30940333179e957cd821fb0e2ce525a52ec5f7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-39.pyc deleted file mode 100644 index de43dd9d54fcdc00b2f5635f96d32e63919dba27..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/commands/cache.py b/env/Lib/site-packages/pip/_internal/commands/cache.py index 5155a5053e742199f0c0495da44eee1541ac8f6b..e96d2b4924c468c666f3ad6dab902f217ee43c39 100644 --- a/env/Lib/site-packages/pip/_internal/commands/cache.py +++ b/env/Lib/site-packages/pip/_internal/commands/cache.py @@ -1,4 +1,3 @@ -import logging import os import textwrap from optparse import Values @@ -8,8 +7,9 @@ import pip._internal.utils.filesystem as filesystem from pip._internal.cli.base_command import Command from pip._internal.cli.status_codes import ERROR, SUCCESS from pip._internal.exceptions import CommandError, PipError +from pip._internal.utils.logging import getLogger -logger = logging.getLogger(__name__) +logger = getLogger(__name__) class CacheCommand(Command): @@ -36,22 +36,19 @@ class CacheCommand(Command): %prog purge """ - def add_options(self): - # type: () -> None - + def add_options(self) -> None: self.cmd_opts.add_option( - '--format', - action='store', - dest='list_format', + "--format", + action="store", + dest="list_format", default="human", - choices=('human', 'abspath'), - help="Select the output format among: human (default) or abspath" + choices=("human", "abspath"), + help="Select the output format among: human (default) or abspath", ) self.parser.insert_option_group(0, self.cmd_opts) - def run(self, options, args): - # type: (Values, List[Any]) -> int + def run(self, options: Values, args: List[str]) -> int: handlers = { "dir": self.get_cache_dir, "info": self.get_cache_info, @@ -61,8 +58,7 @@ class CacheCommand(Command): } if not options.cache_dir: - logger.error("pip cache commands can not " - "function since cache is disabled.") + logger.error("pip cache commands can not function since cache is disabled.") return ERROR # Determine action @@ -84,78 +80,77 @@ class CacheCommand(Command): return SUCCESS - def get_cache_dir(self, options, args): - # type: (Values, List[Any]) -> None + def get_cache_dir(self, options: Values, args: List[Any]) -> None: if args: - raise CommandError('Too many arguments') + raise CommandError("Too many arguments") logger.info(options.cache_dir) - def get_cache_info(self, options, args): - # type: (Values, List[Any]) -> None + def get_cache_info(self, options: Values, args: List[Any]) -> None: if args: - raise CommandError('Too many arguments') + raise CommandError("Too many arguments") num_http_files = len(self._find_http_files(options)) - num_packages = len(self._find_wheels(options, '*')) + num_packages = len(self._find_wheels(options, "*")) - http_cache_location = self._cache_dir(options, 'http') - wheels_cache_location = self._cache_dir(options, 'wheels') + http_cache_location = self._cache_dir(options, "http") + wheels_cache_location = self._cache_dir(options, "wheels") http_cache_size = filesystem.format_directory_size(http_cache_location) - wheels_cache_size = filesystem.format_directory_size( - wheels_cache_location + wheels_cache_size = filesystem.format_directory_size(wheels_cache_location) + + message = ( + textwrap.dedent( + """ + Package index page cache location: {http_cache_location} + Package index page cache size: {http_cache_size} + Number of HTTP files: {num_http_files} + Locally built wheels location: {wheels_cache_location} + Locally built wheels size: {wheels_cache_size} + Number of locally built wheels: {package_count} + """ + ) + .format( + http_cache_location=http_cache_location, + http_cache_size=http_cache_size, + num_http_files=num_http_files, + wheels_cache_location=wheels_cache_location, + package_count=num_packages, + wheels_cache_size=wheels_cache_size, + ) + .strip() ) - message = textwrap.dedent(""" - Package index page cache location: {http_cache_location} - Package index page cache size: {http_cache_size} - Number of HTTP files: {num_http_files} - Wheels location: {wheels_cache_location} - Wheels size: {wheels_cache_size} - Number of wheels: {package_count} - """).format( - http_cache_location=http_cache_location, - http_cache_size=http_cache_size, - num_http_files=num_http_files, - wheels_cache_location=wheels_cache_location, - package_count=num_packages, - wheels_cache_size=wheels_cache_size, - ).strip() - logger.info(message) - def list_cache_items(self, options, args): - # type: (Values, List[Any]) -> None + def list_cache_items(self, options: Values, args: List[Any]) -> None: if len(args) > 1: - raise CommandError('Too many arguments') + raise CommandError("Too many arguments") if args: pattern = args[0] else: - pattern = '*' + pattern = "*" files = self._find_wheels(options, pattern) - if options.list_format == 'human': + if options.list_format == "human": self.format_for_human(files) else: self.format_for_abspath(files) - def format_for_human(self, files): - # type: (List[str]) -> None + def format_for_human(self, files: List[str]) -> None: if not files: - logger.info('Nothing cached.') + logger.info("No locally built wheels cached.") return results = [] for filename in files: wheel = os.path.basename(filename) size = filesystem.format_file_size(filename) - results.append(f' - {wheel} ({size})') - logger.info('Cache contents:\n') - logger.info('\n'.join(sorted(results))) + results.append(f" - {wheel} ({size})") + logger.info("Cache contents:\n") + logger.info("\n".join(sorted(results))) - def format_for_abspath(self, files): - # type: (List[str]) -> None + def format_for_abspath(self, files: List[str]) -> None: if not files: return @@ -163,49 +158,48 @@ class CacheCommand(Command): for filename in files: results.append(filename) - logger.info('\n'.join(sorted(results))) + logger.info("\n".join(sorted(results))) - def remove_cache_items(self, options, args): - # type: (Values, List[Any]) -> None + def remove_cache_items(self, options: Values, args: List[Any]) -> None: if len(args) > 1: - raise CommandError('Too many arguments') + raise CommandError("Too many arguments") if not args: - raise CommandError('Please provide a pattern') + raise CommandError("Please provide a pattern") files = self._find_wheels(options, args[0]) - # Only fetch http files if no specific pattern given - if args[0] == '*': + no_matching_msg = "No matching packages" + if args[0] == "*": + # Only fetch http files if no specific pattern given files += self._find_http_files(options) + else: + # Add the pattern to the log message + no_matching_msg += ' for pattern "{}"'.format(args[0]) if not files: - raise CommandError('No matching packages') + logger.warning(no_matching_msg) for filename in files: os.unlink(filename) - logger.debug('Removed %s', filename) - logger.info('Files removed: %s', len(files)) + logger.verbose("Removed %s", filename) + logger.info("Files removed: %s", len(files)) - def purge_cache(self, options, args): - # type: (Values, List[Any]) -> None + def purge_cache(self, options: Values, args: List[Any]) -> None: if args: - raise CommandError('Too many arguments') + raise CommandError("Too many arguments") - return self.remove_cache_items(options, ['*']) + return self.remove_cache_items(options, ["*"]) - def _cache_dir(self, options, subdir): - # type: (Values, str) -> str + def _cache_dir(self, options: Values, subdir: str) -> str: return os.path.join(options.cache_dir, subdir) - def _find_http_files(self, options): - # type: (Values) -> List[str] - http_dir = self._cache_dir(options, 'http') - return filesystem.find_files(http_dir, '*') + def _find_http_files(self, options: Values) -> List[str]: + http_dir = self._cache_dir(options, "http") + return filesystem.find_files(http_dir, "*") - def _find_wheels(self, options, pattern): - # type: (Values, str) -> List[str] - wheel_dir = self._cache_dir(options, 'wheels') + def _find_wheels(self, options: Values, pattern: str) -> List[str]: + wheel_dir = self._cache_dir(options, "wheels") # The wheel filename format, as specified in PEP 427, is: # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl diff --git a/env/Lib/site-packages/pip/_internal/commands/check.py b/env/Lib/site-packages/pip/_internal/commands/check.py index 70aa5af2249b9435fde3d10b0119d15749ffdc78..5efd0a3416041e3afdf32a2d346db01d99e8f7d9 100644 --- a/env/Lib/site-packages/pip/_internal/commands/check.py +++ b/env/Lib/site-packages/pip/_internal/commands/check.py @@ -1,12 +1,13 @@ import logging from optparse import Values -from typing import Any, List +from typing import List from pip._internal.cli.base_command import Command from pip._internal.cli.status_codes import ERROR, SUCCESS from pip._internal.operations.check import ( check_package_set, create_package_set_from_installed, + warn_legacy_versions_and_specifiers, ) from pip._internal.utils.misc import write_output @@ -19,10 +20,9 @@ class CheckCommand(Command): usage = """ %prog [options]""" - def run(self, options, args): - # type: (Values, List[Any]) -> int - + def run(self, options: Values, args: List[str]) -> int: package_set, parsing_probs = create_package_set_from_installed() + warn_legacy_versions_and_specifiers(package_set) missing, conflicting = check_package_set(package_set) for project_name in missing: @@ -30,7 +30,9 @@ class CheckCommand(Command): for dependency in missing[project_name]: write_output( "%s %s requires %s, which is not installed.", - project_name, version, dependency[0], + project_name, + version, + dependency[0], ) for project_name in conflicting: @@ -38,7 +40,11 @@ class CheckCommand(Command): for dep_name, dep_version, req in conflicting[project_name]: write_output( "%s %s has requirement %s, but you have %s %s.", - project_name, version, req, dep_name, dep_version, + project_name, + version, + req, + dep_name, + dep_version, ) if missing or conflicting or parsing_probs: diff --git a/env/Lib/site-packages/pip/_internal/commands/completion.py b/env/Lib/site-packages/pip/_internal/commands/completion.py index 92cb7882770fcb835adda2fa75fdc6db051c179e..30233fc7ad2c07c42e7c2d384312f1f4373155f6 100644 --- a/env/Lib/site-packages/pip/_internal/commands/completion.py +++ b/env/Lib/site-packages/pip/_internal/commands/completion.py @@ -12,7 +12,7 @@ BASE_COMPLETION = """ """ COMPLETION_SCRIPTS = { - 'bash': """ + "bash": """ _pip_completion() {{ COMPREPLY=( $( COMP_WORDS="${{COMP_WORDS[*]}}" \\ @@ -21,18 +21,13 @@ COMPLETION_SCRIPTS = { }} complete -o default -F _pip_completion {prog} """, - 'zsh': """ - function _pip_completion {{ - local words cword - read -Ac words - read -cn cword - reply=( $( COMP_WORDS="$words[*]" \\ - COMP_CWORD=$(( cword-1 )) \\ - PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )) - }} - compctl -K _pip_completion {prog} + "zsh": """ + #compdef -P pip[0-9.]# + compadd $( COMP_WORDS="$words[*]" \\ + COMP_CWORD=$((CURRENT-1)) \\ + PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ) """, - 'fish': """ + "fish": """ function __fish_complete_pip set -lx COMP_WORDS (commandline -o) "" set -lx COMP_CWORD ( \\ @@ -43,6 +38,28 @@ COMPLETION_SCRIPTS = { end complete -fa "(__fish_complete_pip)" -c {prog} """, + "powershell": """ + if ((Test-Path Function:\\TabExpansion) -and -not ` + (Test-Path Function:\\_pip_completeBackup)) {{ + Rename-Item Function:\\TabExpansion _pip_completeBackup + }} + function TabExpansion($line, $lastWord) {{ + $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart() + if ($lastBlock.StartsWith("{prog} ")) {{ + $Env:COMP_WORDS=$lastBlock + $Env:COMP_CWORD=$lastBlock.Split().Length - 1 + $Env:PIP_AUTO_COMPLETE=1 + (& {prog}).Split() + Remove-Item Env:COMP_WORDS + Remove-Item Env:COMP_CWORD + Remove-Item Env:PIP_AUTO_COMPLETE + }} + elseif (Test-Path Function:\\_pip_completeBackup) {{ + # Fall back on existing tab expansion + _pip_completeBackup $line $lastWord + }} + }} + """, } @@ -51,43 +68,54 @@ class CompletionCommand(Command): ignore_require_venv = True - def add_options(self): - # type: () -> None + def add_options(self) -> None: + self.cmd_opts.add_option( + "--bash", + "-b", + action="store_const", + const="bash", + dest="shell", + help="Emit completion code for bash", + ) self.cmd_opts.add_option( - '--bash', '-b', - action='store_const', - const='bash', - dest='shell', - help='Emit completion code for bash') + "--zsh", + "-z", + action="store_const", + const="zsh", + dest="shell", + help="Emit completion code for zsh", + ) self.cmd_opts.add_option( - '--zsh', '-z', - action='store_const', - const='zsh', - dest='shell', - help='Emit completion code for zsh') + "--fish", + "-f", + action="store_const", + const="fish", + dest="shell", + help="Emit completion code for fish", + ) self.cmd_opts.add_option( - '--fish', '-f', - action='store_const', - const='fish', - dest='shell', - help='Emit completion code for fish') + "--powershell", + "-p", + action="store_const", + const="powershell", + dest="shell", + help="Emit completion code for powershell", + ) self.parser.insert_option_group(0, self.cmd_opts) - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: """Prints the completion code of the given shell""" shells = COMPLETION_SCRIPTS.keys() - shell_options = ['--' + shell for shell in sorted(shells)] + shell_options = ["--" + shell for shell in sorted(shells)] if options.shell in shells: script = textwrap.dedent( - COMPLETION_SCRIPTS.get(options.shell, '').format( - prog=get_prog()) + COMPLETION_SCRIPTS.get(options.shell, "").format(prog=get_prog()) ) print(BASE_COMPLETION.format(script=script, shell=options.shell)) return SUCCESS else: sys.stderr.write( - 'ERROR: You must pass {}\n' .format(' or '.join(shell_options)) + "ERROR: You must pass {}\n".format(" or ".join(shell_options)) ) return SUCCESS diff --git a/env/Lib/site-packages/pip/_internal/commands/configuration.py b/env/Lib/site-packages/pip/_internal/commands/configuration.py index e13f7142ca31107e96c3b9957637d44462e95306..84b134e490b081d661daf69f98e0b9b1fdddd36f 100644 --- a/env/Lib/site-packages/pip/_internal/commands/configuration.py +++ b/env/Lib/site-packages/pip/_internal/commands/configuration.py @@ -27,14 +27,20 @@ class ConfigurationCommand(Command): - list: List the active configuration (or from the file specified) - edit: Edit the configuration file in an editor - - get: Get the value associated with name - - set: Set the name=value - - unset: Unset the value associated with name + - get: Get the value associated with command.option + - set: Set the command.option=value + - unset: Unset the value associated with command.option - debug: List the configuration files and values defined under them + Configuration keys should be dot separated command and option name, + with the special prefix "global" affecting any command. For example, + "pip config set global.index-url https://example.org/" would configure + the index url for all commands, but "pip config set download.timeout 10" + would configure a 10 second timeout only for "pip download" commands. + If none of --user, --global and --site are passed, a virtual environment configuration file is used if one is active and the file - exists. Otherwise, all modifications happen on the to the user file by + exists. Otherwise, all modifications happen to the user file by default. """ @@ -43,53 +49,51 @@ class ConfigurationCommand(Command): %prog [<file-option>] list %prog [<file-option>] [--editor <editor-path>] edit - %prog [<file-option>] get name - %prog [<file-option>] set name value - %prog [<file-option>] unset name + %prog [<file-option>] get command.option + %prog [<file-option>] set command.option value + %prog [<file-option>] unset command.option %prog [<file-option>] debug """ - def add_options(self): - # type: () -> None + def add_options(self) -> None: self.cmd_opts.add_option( - '--editor', - dest='editor', - action='store', + "--editor", + dest="editor", + action="store", default=None, help=( - 'Editor to use to edit the file. Uses VISUAL or EDITOR ' - 'environment variables if not provided.' - ) + "Editor to use to edit the file. Uses VISUAL or EDITOR " + "environment variables if not provided." + ), ) self.cmd_opts.add_option( - '--global', - dest='global_file', - action='store_true', + "--global", + dest="global_file", + action="store_true", default=False, - help='Use the system-wide configuration file only' + help="Use the system-wide configuration file only", ) self.cmd_opts.add_option( - '--user', - dest='user_file', - action='store_true', + "--user", + dest="user_file", + action="store_true", default=False, - help='Use the user configuration file only' + help="Use the user configuration file only", ) self.cmd_opts.add_option( - '--site', - dest='site_file', - action='store_true', + "--site", + dest="site_file", + action="store_true", default=False, - help='Use the current environment configuration file only' + help="Use the current environment configuration file only", ) self.parser.insert_option_group(0, self.cmd_opts) - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: handlers = { "list": self.list_values, "edit": self.open_in_editor, @@ -134,13 +138,16 @@ class ConfigurationCommand(Command): return SUCCESS - def _determine_file(self, options, need_value): - # type: (Values, bool) -> Optional[Kind] - file_options = [key for key, value in ( - (kinds.USER, options.user_file), - (kinds.GLOBAL, options.global_file), - (kinds.SITE, options.site_file), - ) if value] + def _determine_file(self, options: Values, need_value: bool) -> Optional[Kind]: + file_options = [ + key + for key, value in ( + (kinds.USER, options.user_file), + (kinds.GLOBAL, options.global_file), + (kinds.SITE, options.site_file), + ) + if value + ] if not file_options: if not need_value: @@ -161,36 +168,31 @@ class ConfigurationCommand(Command): "(--user, --site, --global) to perform." ) - def list_values(self, options, args): - # type: (Values, List[str]) -> None + def list_values(self, options: Values, args: List[str]) -> None: self._get_n_args(args, "list", n=0) for key, value in sorted(self.configuration.items()): write_output("%s=%r", key, value) - def get_name(self, options, args): - # type: (Values, List[str]) -> None + def get_name(self, options: Values, args: List[str]) -> None: key = self._get_n_args(args, "get [name]", n=1) value = self.configuration.get_value(key) write_output("%s", value) - def set_name_value(self, options, args): - # type: (Values, List[str]) -> None + def set_name_value(self, options: Values, args: List[str]) -> None: key, value = self._get_n_args(args, "set [name] [value]", n=2) self.configuration.set_value(key, value) self._save_configuration() - def unset_name(self, options, args): - # type: (Values, List[str]) -> None + def unset_name(self, options: Values, args: List[str]) -> None: key = self._get_n_args(args, "unset [name]", n=1) self.configuration.unset_value(key) self._save_configuration() - def list_config_values(self, options, args): - # type: (Values, List[str]) -> None + def list_config_values(self, options: Values, args: List[str]) -> None: """List config key-value pairs across different config files""" self._get_n_args(args, "debug", n=0) @@ -202,51 +204,53 @@ class ConfigurationCommand(Command): for fname in files: with indent_log(): file_exists = os.path.exists(fname) - write_output("%s, exists: %r", - fname, file_exists) + write_output("%s, exists: %r", fname, file_exists) if file_exists: self.print_config_file_values(variant) - def print_config_file_values(self, variant): - # type: (Kind) -> None + def print_config_file_values(self, variant: Kind) -> None: """Get key-value pairs from the file of a variant""" - for name, value in self.configuration.\ - get_values_in_config(variant).items(): + for name, value in self.configuration.get_values_in_config(variant).items(): with indent_log(): write_output("%s: %s", name, value) - def print_env_var_values(self): - # type: () -> None + def print_env_var_values(self) -> None: """Get key-values pairs present as environment variables""" - write_output("%s:", 'env_var') + write_output("%s:", "env_var") with indent_log(): for key, value in sorted(self.configuration.get_environ_vars()): - env_var = f'PIP_{key.upper()}' + env_var = f"PIP_{key.upper()}" write_output("%s=%r", env_var, value) - def open_in_editor(self, options, args): - # type: (Values, List[str]) -> None + def open_in_editor(self, options: Values, args: List[str]) -> None: editor = self._determine_editor(options) fname = self.configuration.get_file_to_edit() if fname is None: raise PipError("Could not determine appropriate file.") + elif '"' in fname: + # This shouldn't happen, unless we see a username like that. + # If that happens, we'd appreciate a pull request fixing this. + raise PipError( + f'Can not open an editor for a file name containing "\n{fname}' + ) try: - subprocess.check_call([editor, fname]) + subprocess.check_call(f'{editor} "{fname}"', shell=True) + except FileNotFoundError as e: + if not e.filename: + e.filename = editor + raise except subprocess.CalledProcessError as e: raise PipError( - "Editor Subprocess exited with exit code {}" - .format(e.returncode) + "Editor Subprocess exited with exit code {}".format(e.returncode) ) - def _get_n_args(self, args, example, n): - # type: (List[str], str, int) -> Any - """Helper to make sure the command got the right number of arguments - """ + def _get_n_args(self, args: List[str], example: str, n: int) -> Any: + """Helper to make sure the command got the right number of arguments""" if len(args) != n: msg = ( - 'Got unexpected number of arguments, expected {}. ' + "Got unexpected number of arguments, expected {}. " '(example: "{} config {}")' ).format(n, get_prog(), example) raise PipError(msg) @@ -256,8 +260,7 @@ class ConfigurationCommand(Command): else: return args - def _save_configuration(self): - # type: () -> None + def _save_configuration(self) -> None: # We successfully ran a modifying command. Need to save the # configuration. try: @@ -268,8 +271,7 @@ class ConfigurationCommand(Command): ) raise PipError("Internal Error.") - def _determine_editor(self, options): - # type: (Values) -> str + def _determine_editor(self, options: Values) -> str: if options.editor is not None: return options.editor elif "VISUAL" in os.environ: diff --git a/env/Lib/site-packages/pip/_internal/commands/debug.py b/env/Lib/site-packages/pip/_internal/commands/debug.py index ead5119a27a0c0e26d8c762826360b9e6b2a99a7..2a3e7d298f393ed8532e4f11913635efc94cb329 100644 --- a/env/Lib/site-packages/pip/_internal/commands/debug.py +++ b/env/Lib/site-packages/pip/_internal/commands/debug.py @@ -1,3 +1,4 @@ +import importlib.resources import locale import logging import os @@ -10,7 +11,6 @@ import pip._vendor from pip._vendor.certifi import where from pip._vendor.packaging.version import parse as parse_version -from pip import __file__ as pip_location from pip._internal.cli import cmdoptions from pip._internal.cli.base_command import Command from pip._internal.cli.cmdoptions import make_target_python @@ -23,61 +23,47 @@ from pip._internal.utils.misc import get_pip_version logger = logging.getLogger(__name__) -def show_value(name, value): - # type: (str, Any) -> None - logger.info('%s: %s', name, value) +def show_value(name: str, value: Any) -> None: + logger.info("%s: %s", name, value) -def show_sys_implementation(): - # type: () -> None - logger.info('sys.implementation:') +def show_sys_implementation() -> None: + logger.info("sys.implementation:") implementation_name = sys.implementation.name with indent_log(): - show_value('name', implementation_name) + show_value("name", implementation_name) -def create_vendor_txt_map(): - # type: () -> Dict[str, str] - vendor_txt_path = os.path.join( - os.path.dirname(pip_location), - '_vendor', - 'vendor.txt' - ) - - with open(vendor_txt_path) as f: +def create_vendor_txt_map() -> Dict[str, str]: + with importlib.resources.open_text("pip._vendor", "vendor.txt") as f: # Purge non version specifying lines. # Also, remove any space prefix or suffixes (including comments). - lines = [line.strip().split(' ', 1)[0] - for line in f.readlines() if '==' in line] + lines = [ + line.strip().split(" ", 1)[0] for line in f.readlines() if "==" in line + ] # Transform into "module" -> version dict. - return dict(line.split('==', 1) for line in lines) # type: ignore + return dict(line.split("==", 1) for line in lines) -def get_module_from_module_name(module_name): - # type: (str) -> ModuleType +def get_module_from_module_name(module_name: str) -> ModuleType: # Module name can be uppercase in vendor.txt for some reason... - module_name = module_name.lower() + module_name = module_name.lower().replace("-", "_") # PATCH: setuptools is actually only pkg_resources. - if module_name == 'setuptools': - module_name = 'pkg_resources' - - __import__( - f'pip._vendor.{module_name}', - globals(), - locals(), - level=0 - ) + if module_name == "setuptools": + module_name = "pkg_resources" + + __import__(f"pip._vendor.{module_name}", globals(), locals(), level=0) return getattr(pip._vendor, module_name) -def get_vendor_version_from_module(module_name): - # type: (str) -> Optional[str] +def get_vendor_version_from_module(module_name: str) -> Optional[str]: module = get_module_from_module_name(module_name) - version = getattr(module, '__version__', None) + version = getattr(module, "__version__", None) if not version: # Try to find version in debundled module info. + assert module.__file__ is not None env = get_environment([os.path.dirname(module.__file__)]) dist = env.get_distribution(module_name) if dist: @@ -86,35 +72,36 @@ def get_vendor_version_from_module(module_name): return version -def show_actual_vendor_versions(vendor_txt_versions): - # type: (Dict[str, str]) -> None +def show_actual_vendor_versions(vendor_txt_versions: Dict[str, str]) -> None: """Log the actual version and print extra info if there is a conflict or if the actual version could not be imported. """ for module_name, expected_version in vendor_txt_versions.items(): - extra_message = '' + extra_message = "" actual_version = get_vendor_version_from_module(module_name) if not actual_version: - extra_message = ' (Unable to locate actual module version, using'\ - ' vendor.txt specified version)' + extra_message = ( + " (Unable to locate actual module version, using" + " vendor.txt specified version)" + ) actual_version = expected_version elif parse_version(actual_version) != parse_version(expected_version): - extra_message = ' (CONFLICT: vendor.txt suggests version should'\ - ' be {})'.format(expected_version) - logger.info('%s==%s%s', module_name, actual_version, extra_message) + extra_message = ( + " (CONFLICT: vendor.txt suggests version should" + " be {})".format(expected_version) + ) + logger.info("%s==%s%s", module_name, actual_version, extra_message) -def show_vendor_versions(): - # type: () -> None - logger.info('vendored library versions:') +def show_vendor_versions() -> None: + logger.info("vendored library versions:") vendor_txt_versions = create_vendor_txt_map() with indent_log(): show_actual_vendor_versions(vendor_txt_versions) -def show_tags(options): - # type: (Values) -> None +def show_tags(options: Values) -> None: tag_limit = 10 target_python = make_target_python(options) @@ -122,11 +109,11 @@ def show_tags(options): # Display the target options that were explicitly provided. formatted_target = target_python.format_given() - suffix = '' + suffix = "" if formatted_target: - suffix = f' (target: {formatted_target})' + suffix = f" (target: {formatted_target})" - msg = 'Compatible tags: {}{}'.format(len(tags), suffix) + msg = "Compatible tags: {}{}".format(len(tags), suffix) logger.info(msg) if options.verbose < 1 and len(tags) > tag_limit: @@ -141,30 +128,28 @@ def show_tags(options): if tags_limited: msg = ( - '...\n' - '[First {tag_limit} tags shown. Pass --verbose to show all.]' + "...\n[First {tag_limit} tags shown. Pass --verbose to show all.]" ).format(tag_limit=tag_limit) logger.info(msg) -def ca_bundle_info(config): - # type: (Configuration) -> str +def ca_bundle_info(config: Configuration) -> str: levels = set() for key, _ in config.items(): - levels.add(key.split('.')[0]) + levels.add(key.split(".")[0]) if not levels: return "Not specified" - levels_that_override_global = ['install', 'wheel', 'download'] + levels_that_override_global = ["install", "wheel", "download"] global_overriding_level = [ level for level in levels if level in levels_that_override_global ] if not global_overriding_level: - return 'global' + return "global" - if 'global' in levels: - levels.remove('global') + if "global" in levels: + levels.remove("global") return ", ".join(levels) @@ -177,34 +162,33 @@ class DebugCommand(Command): %prog <options>""" ignore_require_venv = True - def add_options(self): - # type: () -> None + def add_options(self) -> None: cmdoptions.add_target_python_options(self.cmd_opts) self.parser.insert_option_group(0, self.cmd_opts) self.parser.config.load() - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: logger.warning( "This command is only meant for debugging. " "Do not use this with automation for parsing and getting these " "details, since the output and options of this command may " "change without notice." ) - show_value('pip version', get_pip_version()) - show_value('sys.version', sys.version) - show_value('sys.executable', sys.executable) - show_value('sys.getdefaultencoding', sys.getdefaultencoding()) - show_value('sys.getfilesystemencoding', sys.getfilesystemencoding()) + show_value("pip version", get_pip_version()) + show_value("sys.version", sys.version) + show_value("sys.executable", sys.executable) + show_value("sys.getdefaultencoding", sys.getdefaultencoding()) + show_value("sys.getfilesystemencoding", sys.getfilesystemencoding()) show_value( - 'locale.getpreferredencoding', locale.getpreferredencoding(), + "locale.getpreferredencoding", + locale.getpreferredencoding(), ) - show_value('sys.platform', sys.platform) + show_value("sys.platform", sys.platform) show_sys_implementation() show_value("'cert' config value", ca_bundle_info(self.parser.config)) - show_value("REQUESTS_CA_BUNDLE", os.environ.get('REQUESTS_CA_BUNDLE')) - show_value("CURL_CA_BUNDLE", os.environ.get('CURL_CA_BUNDLE')) + show_value("REQUESTS_CA_BUNDLE", os.environ.get("REQUESTS_CA_BUNDLE")) + show_value("CURL_CA_BUNDLE", os.environ.get("CURL_CA_BUNDLE")) show_value("pip._vendor.certifi.where()", where()) show_value("pip._vendor.DEBUNDLED", pip._vendor.DEBUNDLED) diff --git a/env/Lib/site-packages/pip/_internal/commands/download.py b/env/Lib/site-packages/pip/_internal/commands/download.py index 19f8d6c02758398cbbfefb5e3097d0152906e34c..54247a78a654187206cd17a403913c6257ffcc7d 100644 --- a/env/Lib/site-packages/pip/_internal/commands/download.py +++ b/env/Lib/site-packages/pip/_internal/commands/download.py @@ -7,7 +7,8 @@ from pip._internal.cli import cmdoptions from pip._internal.cli.cmdoptions import make_target_python from pip._internal.cli.req_command import RequirementCommand, with_cleanup from pip._internal.cli.status_codes import SUCCESS -from pip._internal.req.req_tracker import get_requirement_tracker +from pip._internal.operations.build.build_tracker import get_build_tracker +from pip._internal.req.req_install import check_legacy_setup_py_options from pip._internal.utils.misc import ensure_dir, normalize_path, write_output from pip._internal.utils.temp_dir import TempDirectory @@ -34,11 +35,9 @@ class DownloadCommand(RequirementCommand): %prog [options] <local project path> ... %prog [options] <archive url/path> ...""" - def add_options(self): - # type: () -> None + def add_options(self) -> None: self.cmd_opts.add_option(cmdoptions.constraints()) self.cmd_opts.add_option(cmdoptions.requirements()) - self.cmd_opts.add_option(cmdoptions.build_dir()) self.cmd_opts.add_option(cmdoptions.no_deps()) self.cmd_opts.add_option(cmdoptions.global_options()) self.cmd_opts.add_option(cmdoptions.no_binary()) @@ -51,14 +50,18 @@ class DownloadCommand(RequirementCommand): self.cmd_opts.add_option(cmdoptions.no_build_isolation()) self.cmd_opts.add_option(cmdoptions.use_pep517()) self.cmd_opts.add_option(cmdoptions.no_use_pep517()) + self.cmd_opts.add_option(cmdoptions.check_build_deps()) self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) self.cmd_opts.add_option( - '-d', '--dest', '--destination-dir', '--destination-directory', - dest='download_dir', - metavar='dir', + "-d", + "--dest", + "--destination-dir", + "--destination-directory", + dest="download_dir", + metavar="dir", default=os.curdir, - help=("Download packages into <dir>."), + help="Download packages into <dir>.", ) cmdoptions.add_target_python_options(self.cmd_opts) @@ -72,9 +75,7 @@ class DownloadCommand(RequirementCommand): self.parser.insert_option_group(0, self.cmd_opts) @with_cleanup - def run(self, options, args): - # type: (Values, List[str]) -> int - + def run(self, options: Values, args: List[str]) -> int: options.ignore_installed = True # editable doesn't really make sense for `pip download`, but the bowels # of the RequirementSet code require that property. @@ -95,7 +96,7 @@ class DownloadCommand(RequirementCommand): ignore_requires_python=options.ignore_requires_python, ) - req_tracker = self.enter_context(get_requirement_tracker()) + build_tracker = self.enter_context(get_build_tracker()) directory = TempDirectory( delete=not options.no_clean, @@ -104,15 +105,17 @@ class DownloadCommand(RequirementCommand): ) reqs = self.get_requirements(args, options, finder, session) + check_legacy_setup_py_options(options, reqs) preparer = self.make_requirement_preparer( temp_build_dir=directory, options=options, - req_tracker=req_tracker, + build_tracker=build_tracker, session=session, finder=finder, download_dir=options.download_dir, use_user_site=False, + verbosity=self.verbosity, ) resolver = self.make_resolver( @@ -120,22 +123,25 @@ class DownloadCommand(RequirementCommand): finder=finder, options=options, ignore_requires_python=options.ignore_requires_python, + use_pep517=options.use_pep517, py_version_info=options.python_version, ) self.trace_basic_info(finder) - requirement_set = resolver.resolve( - reqs, check_supported_wheels=True - ) + requirement_set = resolver.resolve(reqs, check_supported_wheels=True) - downloaded = [] # type: List[str] + downloaded: List[str] = [] for req in requirement_set.requirements.values(): if req.satisfied_by is None: assert req.name is not None preparer.save_linked_requirement(req) downloaded.append(req.name) + + preparer.prepare_linked_requirements_more(requirement_set.requirements.values()) + requirement_set.warn_legacy_versions_and_specifiers() + if downloaded: - write_output('Successfully downloaded %s', ' '.join(downloaded)) + write_output("Successfully downloaded %s", " ".join(downloaded)) return SUCCESS diff --git a/env/Lib/site-packages/pip/_internal/commands/freeze.py b/env/Lib/site-packages/pip/_internal/commands/freeze.py index 430d1018f04235f61eab17807115db7d65390c00..fd9d88a8b017d6c1f2600b71812977e80d36d9bd 100644 --- a/env/Lib/site-packages/pip/_internal/commands/freeze.py +++ b/env/Lib/site-packages/pip/_internal/commands/freeze.py @@ -1,15 +1,25 @@ import sys from optparse import Values -from typing import List +from typing import AbstractSet, List from pip._internal.cli import cmdoptions from pip._internal.cli.base_command import Command from pip._internal.cli.status_codes import SUCCESS from pip._internal.operations.freeze import freeze from pip._internal.utils.compat import stdlib_pkgs -from pip._internal.utils.deprecation import deprecated -DEV_PKGS = {'pip', 'setuptools', 'distribute', 'wheel'} + +def _should_suppress_build_backends() -> bool: + return sys.version_info < (3, 12) + + +def _dev_pkgs() -> AbstractSet[str]: + pkgs = {"pip"} + + if _should_suppress_build_backends(): + pkgs |= {"setuptools", "distribute", "wheel"} + + return pkgs class FreezeCommand(Command): @@ -23,76 +33,70 @@ class FreezeCommand(Command): %prog [options]""" log_streams = ("ext://sys.stderr", "ext://sys.stderr") - def add_options(self): - # type: () -> None - self.cmd_opts.add_option( - '-r', '--requirement', - dest='requirements', - action='append', - default=[], - metavar='file', - help="Use the order in the given requirements file and its " - "comments when generating output. This option can be " - "used multiple times.") + def add_options(self) -> None: self.cmd_opts.add_option( - '-f', '--find-links', - dest='find_links', - action='append', + "-r", + "--requirement", + dest="requirements", + action="append", default=[], - metavar='URL', - help='URL for finding packages, which will be added to the ' - 'output.') + metavar="file", + help=( + "Use the order in the given requirements file and its " + "comments when generating output. This option can be " + "used multiple times." + ), + ) self.cmd_opts.add_option( - '-l', '--local', - dest='local', - action='store_true', + "-l", + "--local", + dest="local", + action="store_true", default=False, - help='If in a virtualenv that has global access, do not output ' - 'globally-installed packages.') + help=( + "If in a virtualenv that has global access, do not output " + "globally-installed packages." + ), + ) self.cmd_opts.add_option( - '--user', - dest='user', - action='store_true', + "--user", + dest="user", + action="store_true", default=False, - help='Only output packages installed in user-site.') + help="Only output packages installed in user-site.", + ) self.cmd_opts.add_option(cmdoptions.list_path()) self.cmd_opts.add_option( - '--all', - dest='freeze_all', - action='store_true', - help='Do not skip these packages in the output:' - ' {}'.format(', '.join(DEV_PKGS))) + "--all", + dest="freeze_all", + action="store_true", + help=( + "Do not skip these packages in the output:" + " {}".format(", ".join(_dev_pkgs())) + ), + ) self.cmd_opts.add_option( - '--exclude-editable', - dest='exclude_editable', - action='store_true', - help='Exclude editable package from output.') + "--exclude-editable", + dest="exclude_editable", + action="store_true", + help="Exclude editable package from output.", + ) self.cmd_opts.add_option(cmdoptions.list_exclude()) self.parser.insert_option_group(0, self.cmd_opts) - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: skip = set(stdlib_pkgs) if not options.freeze_all: - skip.update(DEV_PKGS) + skip.update(_dev_pkgs()) if options.excludes: skip.update(options.excludes) cmdoptions.check_list_path_option(options) - if options.find_links: - deprecated( - "--find-links option in pip freeze is deprecated.", - replacement=None, - gone_in="21.2", - issue=9069, - ) - for line in freeze( requirement=options.requirements, - find_links=options.find_links, local_only=options.local, user_only=options.user, paths=options.path, @@ -100,5 +104,5 @@ class FreezeCommand(Command): skip=skip, exclude_editable=options.exclude_editable, ): - sys.stdout.write(line + '\n') + sys.stdout.write(line + "\n") return SUCCESS diff --git a/env/Lib/site-packages/pip/_internal/commands/hash.py b/env/Lib/site-packages/pip/_internal/commands/hash.py index bca48dcc078f476828609fd52521d0001252db30..042dac813e74b8187c3754cb9a937c7f7183e331 100644 --- a/env/Lib/site-packages/pip/_internal/commands/hash.py +++ b/env/Lib/site-packages/pip/_internal/commands/hash.py @@ -20,38 +20,39 @@ class HashCommand(Command): installs. """ - usage = '%prog [options] <file> ...' + usage = "%prog [options] <file> ..." ignore_require_venv = True - def add_options(self): - # type: () -> None + def add_options(self) -> None: self.cmd_opts.add_option( - '-a', '--algorithm', - dest='algorithm', + "-a", + "--algorithm", + dest="algorithm", choices=STRONG_HASHES, - action='store', + action="store", default=FAVORITE_HASH, - help='The hash algorithm to use: one of {}'.format( - ', '.join(STRONG_HASHES))) + help="The hash algorithm to use: one of {}".format( + ", ".join(STRONG_HASHES) + ), + ) self.parser.insert_option_group(0, self.cmd_opts) - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: if not args: self.parser.print_usage(sys.stderr) return ERROR algorithm = options.algorithm for path in args: - write_output('%s:\n--hash=%s:%s', - path, algorithm, _hash_of_file(path, algorithm)) + write_output( + "%s:\n--hash=%s:%s", path, algorithm, _hash_of_file(path, algorithm) + ) return SUCCESS -def _hash_of_file(path, algorithm): - # type: (str, str) -> str +def _hash_of_file(path: str, algorithm: str) -> str: """Return the hash digest of a file.""" - with open(path, 'rb') as archive: + with open(path, "rb") as archive: hash = hashlib.new(algorithm) for chunk in read_chunks(archive): hash.update(chunk) diff --git a/env/Lib/site-packages/pip/_internal/commands/help.py b/env/Lib/site-packages/pip/_internal/commands/help.py index 79d0eb49b1a526ab0c0cff8a70798bdd4fe53c8a..62066318b74dcc5c32bcd24b9493fb34d1ce52d7 100644 --- a/env/Lib/site-packages/pip/_internal/commands/help.py +++ b/env/Lib/site-packages/pip/_internal/commands/help.py @@ -13,8 +13,7 @@ class HelpCommand(Command): %prog <command>""" ignore_require_venv = True - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: from pip._internal.commands import ( commands_dict, create_command, @@ -34,7 +33,7 @@ class HelpCommand(Command): if guess: msg.append(f'maybe you meant "{guess}"') - raise CommandError(' - '.join(msg)) + raise CommandError(" - ".join(msg)) command = create_command(cmd_name) command.parser.print_help() diff --git a/env/Lib/site-packages/pip/_internal/commands/install.py b/env/Lib/site-packages/pip/_internal/commands/install.py index 6932f5a6d8b680dda9893633b793f70bae8dc36c..f6a300804f4a99eb79b4f3a1ee676251c30e629f 100644 --- a/env/Lib/site-packages/pip/_internal/commands/install.py +++ b/env/Lib/site-packages/pip/_internal/commands/install.py @@ -1,13 +1,13 @@ import errno -import logging +import json import operator import os import shutil import site from optparse import SUPPRESS_HELP, Values -from typing import Iterable, List, Optional +from typing import List, Optional -from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.rich import print_json from pip._internal.cache import WheelCache from pip._internal.cli import cmdoptions @@ -21,14 +21,19 @@ from pip._internal.cli.status_codes import ERROR, SUCCESS from pip._internal.exceptions import CommandError, InstallationError from pip._internal.locations import get_scheme from pip._internal.metadata import get_environment -from pip._internal.models.format_control import FormatControl +from pip._internal.models.installation_report import InstallationReport +from pip._internal.operations.build.build_tracker import get_build_tracker from pip._internal.operations.check import ConflictDetails, check_install_conflicts from pip._internal.req import install_given_reqs -from pip._internal.req.req_install import InstallRequirement -from pip._internal.req.req_tracker import get_requirement_tracker -from pip._internal.utils.distutils_args import parse_distutils_args +from pip._internal.req.req_install import ( + InstallRequirement, + check_legacy_setup_py_options, +) +from pip._internal.utils.compat import WINDOWS from pip._internal.utils.filesystem import test_writable_dir +from pip._internal.utils.logging import getLogger from pip._internal.utils.misc import ( + check_externally_managed, ensure_dir, get_pip_version, protect_pip_from_modification_on_windows, @@ -39,24 +44,9 @@ from pip._internal.utils.virtualenv import ( running_under_virtualenv, virtualenv_no_global, ) -from pip._internal.wheel_builder import ( - BinaryAllowedPredicate, - build, - should_build_for_install_command, -) - -logger = logging.getLogger(__name__) - +from pip._internal.wheel_builder import build, should_build_for_install_command -def get_check_binary_allowed(format_control): - # type: (FormatControl) -> BinaryAllowedPredicate - def check_binary_allowed(req): - # type: (InstallRequirement) -> bool - canonical_name = canonicalize_name(req.name or "") - allowed_formats = format_control.get_allowed_formats(canonical_name) - return "binary" in allowed_formats - - return check_binary_allowed +logger = getLogger(__name__) class InstallCommand(RequirementCommand): @@ -79,8 +69,7 @@ class InstallCommand(RequirementCommand): %prog [options] [-e] <local project path> ... %prog [options] <archive url/path> ...""" - def add_options(self): - # type: () -> None + def add_options(self) -> None: self.cmd_opts.add_option(cmdoptions.requirements()) self.cmd_opts.add_option(cmdoptions.constraints()) self.cmd_opts.add_option(cmdoptions.no_deps()) @@ -88,95 +77,129 @@ class InstallCommand(RequirementCommand): self.cmd_opts.add_option(cmdoptions.editable()) self.cmd_opts.add_option( - '-t', '--target', - dest='target_dir', - metavar='dir', + "--dry-run", + action="store_true", + dest="dry_run", + default=False, + help=( + "Don't actually install anything, just print what would be. " + "Can be used in combination with --ignore-installed " + "to 'resolve' the requirements." + ), + ) + self.cmd_opts.add_option( + "-t", + "--target", + dest="target_dir", + metavar="dir", default=None, - help='Install packages into <dir>. ' - 'By default this will not replace existing files/folders in ' - '<dir>. Use --upgrade to replace existing packages in <dir> ' - 'with new versions.' + help=( + "Install packages into <dir>. " + "By default this will not replace existing files/folders in " + "<dir>. Use --upgrade to replace existing packages in <dir> " + "with new versions." + ), ) cmdoptions.add_target_python_options(self.cmd_opts) self.cmd_opts.add_option( - '--user', - dest='use_user_site', - action='store_true', - help="Install to the Python user install directory for your " - "platform. Typically ~/.local/, or %APPDATA%\\Python on " - "Windows. (See the Python documentation for site.USER_BASE " - "for full details.)") + "--user", + dest="use_user_site", + action="store_true", + help=( + "Install to the Python user install directory for your " + "platform. Typically ~/.local/, or %APPDATA%\\Python on " + "Windows. (See the Python documentation for site.USER_BASE " + "for full details.)" + ), + ) self.cmd_opts.add_option( - '--no-user', - dest='use_user_site', - action='store_false', - help=SUPPRESS_HELP) + "--no-user", + dest="use_user_site", + action="store_false", + help=SUPPRESS_HELP, + ) self.cmd_opts.add_option( - '--root', - dest='root_path', - metavar='dir', + "--root", + dest="root_path", + metavar="dir", default=None, - help="Install everything relative to this alternate root " - "directory.") + help="Install everything relative to this alternate root directory.", + ) self.cmd_opts.add_option( - '--prefix', - dest='prefix_path', - metavar='dir', + "--prefix", + dest="prefix_path", + metavar="dir", default=None, - help="Installation prefix where lib, bin and other top-level " - "folders are placed") - - self.cmd_opts.add_option(cmdoptions.build_dir()) + help=( + "Installation prefix where lib, bin and other top-level " + "folders are placed. Note that the resulting installation may " + "contain scripts and other resources which reference the " + "Python interpreter of pip, and not that of ``--prefix``. " + "See also the ``--python`` option if the intention is to " + "install packages into another (possibly pip-free) " + "environment." + ), + ) self.cmd_opts.add_option(cmdoptions.src()) self.cmd_opts.add_option( - '-U', '--upgrade', - dest='upgrade', - action='store_true', - help='Upgrade all specified packages to the newest available ' - 'version. The handling of dependencies depends on the ' - 'upgrade-strategy used.' + "-U", + "--upgrade", + dest="upgrade", + action="store_true", + help=( + "Upgrade all specified packages to the newest available " + "version. The handling of dependencies depends on the " + "upgrade-strategy used." + ), ) self.cmd_opts.add_option( - '--upgrade-strategy', - dest='upgrade_strategy', - default='only-if-needed', - choices=['only-if-needed', 'eager'], - help='Determines how dependency upgrading should be handled ' - '[default: %default]. ' - '"eager" - dependencies are upgraded regardless of ' - 'whether the currently installed version satisfies the ' - 'requirements of the upgraded package(s). ' - '"only-if-needed" - are upgraded only when they do not ' - 'satisfy the requirements of the upgraded package(s).' + "--upgrade-strategy", + dest="upgrade_strategy", + default="only-if-needed", + choices=["only-if-needed", "eager"], + help=( + "Determines how dependency upgrading should be handled " + "[default: %default]. " + '"eager" - dependencies are upgraded regardless of ' + "whether the currently installed version satisfies the " + "requirements of the upgraded package(s). " + '"only-if-needed" - are upgraded only when they do not ' + "satisfy the requirements of the upgraded package(s)." + ), ) self.cmd_opts.add_option( - '--force-reinstall', - dest='force_reinstall', - action='store_true', - help='Reinstall all packages even if they are already ' - 'up-to-date.') + "--force-reinstall", + dest="force_reinstall", + action="store_true", + help="Reinstall all packages even if they are already up-to-date.", + ) self.cmd_opts.add_option( - '-I', '--ignore-installed', - dest='ignore_installed', - action='store_true', - help='Ignore the installed packages, overwriting them. ' - 'This can break your system if the existing package ' - 'is of a different version or was installed ' - 'with a different package manager!' + "-I", + "--ignore-installed", + dest="ignore_installed", + action="store_true", + help=( + "Ignore the installed packages, overwriting them. " + "This can break your system if the existing package " + "is of a different version or was installed " + "with a different package manager!" + ), ) self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) self.cmd_opts.add_option(cmdoptions.no_build_isolation()) self.cmd_opts.add_option(cmdoptions.use_pep517()) self.cmd_opts.add_option(cmdoptions.no_use_pep517()) + self.cmd_opts.add_option(cmdoptions.check_build_deps()) + self.cmd_opts.add_option(cmdoptions.override_externally_managed()) - self.cmd_opts.add_option(cmdoptions.install_options()) + self.cmd_opts.add_option(cmdoptions.config_settings()) self.cmd_opts.add_option(cmdoptions.global_options()) self.cmd_opts.add_option( @@ -208,12 +231,12 @@ class InstallCommand(RequirementCommand): default=True, help="Do not warn about broken dependencies", ) - self.cmd_opts.add_option(cmdoptions.no_binary()) self.cmd_opts.add_option(cmdoptions.only_binary()) self.cmd_opts.add_option(cmdoptions.prefer_binary()) self.cmd_opts.add_option(cmdoptions.require_hashes()) self.cmd_opts.add_option(cmdoptions.progress_bar()) + self.cmd_opts.add_option(cmdoptions.root_user_action()) index_opts = cmdoptions.make_option_group( cmdoptions.index_group, @@ -223,22 +246,51 @@ class InstallCommand(RequirementCommand): self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, self.cmd_opts) + self.cmd_opts.add_option( + "--report", + dest="json_report_file", + metavar="file", + default=None, + help=( + "Generate a JSON file describing what pip did to install " + "the provided requirements. " + "Can be used in combination with --dry-run and --ignore-installed " + "to 'resolve' the requirements. " + "When - is used as file name it writes to stdout. " + "When writing to stdout, please combine with the --quiet option " + "to avoid mixing pip logging output with JSON output." + ), + ) + @with_cleanup - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: if options.use_user_site and options.target_dir is not None: raise CommandError("Can not combine '--user' and '--target'") - cmdoptions.check_install_build_global(options) + # Check whether the environment we're installing into is externally + # managed, as specified in PEP 668. Specifying --root, --target, or + # --prefix disables the check, since there's no reliable way to locate + # the EXTERNALLY-MANAGED file for those cases. An exception is also + # made specifically for "--dry-run --report" for convenience. + installing_into_current_environment = ( + not (options.dry_run and options.json_report_file) + and options.root_path is None + and options.target_dir is None + and options.prefix_path is None + ) + if ( + installing_into_current_environment + and not options.override_externally_managed + ): + check_externally_managed() + upgrade_strategy = "to-satisfy-only" if options.upgrade: upgrade_strategy = options.upgrade_strategy cmdoptions.check_dist_restriction(options, check_target=True) - install_options = options.install_options or [] - - logger.debug("Using %s", get_pip_version()) + logger.verbose("Using %s", get_pip_version()) options.use_user_site = decide_user_install( options.use_user_site, prefix_path=options.prefix_path, @@ -247,16 +299,19 @@ class InstallCommand(RequirementCommand): isolated_mode=options.isolated_mode, ) - target_temp_dir = None # type: Optional[TempDirectory] - target_temp_dir_path = None # type: Optional[str] + target_temp_dir: Optional[TempDirectory] = None + target_temp_dir_path: Optional[str] = None if options.target_dir: options.ignore_installed = True options.target_dir = os.path.abspath(options.target_dir) - if (os.path.exists(options.target_dir) and not - os.path.isdir(options.target_dir)): + if ( + # fmt: off + os.path.exists(options.target_dir) and + not os.path.isdir(options.target_dir) + # fmt: on + ): raise CommandError( - "Target path exists but is not a directory, will not " - "continue." + "Target path exists but is not a directory, will not continue." ) # Create a target directory for using with the target option @@ -275,9 +330,7 @@ class InstallCommand(RequirementCommand): target_python=target_python, ignore_requires_python=options.ignore_requires_python, ) - wheel_cache = WheelCache(options.cache_dir, options.format_control) - - req_tracker = self.enter_context(get_requirement_tracker()) + build_tracker = self.enter_context(get_build_tracker()) directory = TempDirectory( delete=not options.no_clean, @@ -287,18 +340,24 @@ class InstallCommand(RequirementCommand): try: reqs = self.get_requirements(args, options, finder, session) + check_legacy_setup_py_options(options, reqs) - reject_location_related_install_options( - reqs, options.install_options - ) + wheel_cache = WheelCache(options.cache_dir) + + # Only when installing is it permitted to use PEP 660. + # In other circumstances (pip wheel, pip download) we generate + # regular (i.e. non editable) metadata and wheels. + for req in reqs: + req.permit_editable_wheels = True preparer = self.make_requirement_preparer( temp_build_dir=directory, options=options, - req_tracker=req_tracker, + build_tracker=build_tracker, session=session, finder=finder, use_user_site=options.use_user_site, + verbosity=self.verbosity, ) resolver = self.make_resolver( preparer=preparer, @@ -319,6 +378,29 @@ class InstallCommand(RequirementCommand): reqs, check_supported_wheels=not options.target_dir ) + if options.json_report_file: + report = InstallationReport(requirement_set.requirements_to_install) + if options.json_report_file == "-": + print_json(data=report.to_dict()) + else: + with open(options.json_report_file, "w", encoding="utf-8") as f: + json.dump(report.to_dict(), f, indent=2, ensure_ascii=False) + + if options.dry_run: + # In non dry-run mode, the legacy versions and specifiers check + # will be done as part of conflict detection. + requirement_set.warn_legacy_versions_and_specifiers() + would_install_items = sorted( + (r.metadata["name"], r.metadata["version"]) + for r in requirement_set.requirements_to_install + ) + if would_install_items: + write_output( + "Would install %s", + " ".join("-".join(item) for item in would_install_items), + ) + return SUCCESS + try: pip_req = requirement_set.get_requirement("pip") except KeyError: @@ -327,19 +409,12 @@ class InstallCommand(RequirementCommand): # If we're not replacing an already installed pip, # we're not modifying it. modifying_pip = pip_req.satisfied_by is None - protect_pip_from_modification_on_windows( - modifying_pip=modifying_pip - ) - - check_binary_allowed = get_check_binary_allowed( - finder.format_control - ) + protect_pip_from_modification_on_windows(modifying_pip=modifying_pip) reqs_to_build = [ - r for r in requirement_set.requirements.values() - if should_build_for_install_command( - r, check_binary_allowed - ) + r + for r in requirement_set.requirements.values() + if should_build_for_install_command(r) ] _, build_failures = build( @@ -347,52 +422,35 @@ class InstallCommand(RequirementCommand): wheel_cache=wheel_cache, verify=True, build_options=[], - global_options=[], + global_options=global_options, ) - # If we're using PEP 517, we cannot do a direct install - # so we fail here. - pep517_build_failure_names = [ - r.name # type: ignore - for r in build_failures if r.use_pep517 - ] # type: List[str] - if pep517_build_failure_names: + if build_failures: raise InstallationError( - "Could not build wheels for {} which use" - " PEP 517 and cannot be installed directly".format( - ", ".join(pep517_build_failure_names) + "Could not build wheels for {}, which is required to " + "install pyproject.toml-based projects".format( + ", ".join(r.name for r in build_failures) # type: ignore ) ) - # For now, we just warn about failures building legacy - # requirements, as we'll fall through to a direct - # install for those. - for r in build_failures: - if not r.use_pep517: - r.legacy_install_reason = 8368 - - to_install = resolver.get_installation_order( - requirement_set - ) + to_install = resolver.get_installation_order(requirement_set) # Check for conflicts in the package set we're installing. - conflicts = None # type: Optional[ConflictDetails] + conflicts: Optional[ConflictDetails] = None should_warn_about_conflicts = ( - not options.ignore_dependencies and - options.warn_about_conflicts + not options.ignore_dependencies and options.warn_about_conflicts ) if should_warn_about_conflicts: conflicts = self._determine_conflicts(to_install) # Don't warn about script install locations if - # --target has been specified + # --target or --prefix has been specified warn_script_location = options.warn_script_location - if options.target_dir: + if options.target_dir or options.prefix_path: warn_script_location = False installed = install_given_reqs( to_install, - install_options, global_options, root=options.root_path, home=target_temp_dir_path, @@ -411,7 +469,7 @@ class InstallCommand(RequirementCommand): ) env = get_environment(lib_locations) - installed.sort(key=operator.attrgetter('name')) + installed.sort(key=operator.attrgetter("name")) items = [] for result in installed: item = result.name @@ -429,16 +487,19 @@ class InstallCommand(RequirementCommand): resolver_variant=self.determine_resolver_variant(options), ) - installed_desc = ' '.join(items) + installed_desc = " ".join(items) if installed_desc: write_output( - 'Successfully installed %s', installed_desc, + "Successfully installed %s", + installed_desc, ) except OSError as error: - show_traceback = (self.verbosity >= 1) + show_traceback = self.verbosity >= 1 message = create_os_error_message( - error, show_traceback, options.use_user_site, + error, + show_traceback, + options.use_user_site, ) logger.error(message, exc_info=show_traceback) # noqa @@ -449,12 +510,13 @@ class InstallCommand(RequirementCommand): self._handle_target_dir( options.target_dir, target_temp_dir, options.upgrade ) - - warn_if_run_as_root() + if options.root_user_action == "warn": + warn_if_run_as_root() return SUCCESS - def _handle_target_dir(self, target_dir, target_temp_dir, upgrade): - # type: (str, TempDirectory, bool) -> None + def _handle_target_dir( + self, target_dir: str, target_temp_dir: TempDirectory, upgrade: bool + ) -> None: ensure_dir(target_dir) # Checking both purelib and platlib directories for installed @@ -463,7 +525,7 @@ class InstallCommand(RequirementCommand): # Checking both purelib and platlib directories for installed # packages to be moved to target directory - scheme = get_scheme('', home=target_temp_dir.path) + scheme = get_scheme("", home=target_temp_dir.path) purelib_dir = scheme.purelib platlib_dir = scheme.platlib data_dir = scheme.data @@ -485,18 +547,18 @@ class InstallCommand(RequirementCommand): if os.path.exists(target_item_dir): if not upgrade: logger.warning( - 'Target directory %s already exists. Specify ' - '--upgrade to force replacement.', - target_item_dir + "Target directory %s already exists. Specify " + "--upgrade to force replacement.", + target_item_dir, ) continue if os.path.islink(target_item_dir): logger.warning( - 'Target directory %s already exists and is ' - 'a link. pip will not automatically replace ' - 'links, please remove if replacement is ' - 'desired.', - target_item_dir + "Target directory %s already exists and is " + "a link. pip will not automatically replace " + "links, please remove if replacement is " + "desired.", + target_item_dir, ) continue if os.path.isdir(target_item_dir): @@ -504,13 +566,11 @@ class InstallCommand(RequirementCommand): else: os.remove(target_item_dir) - shutil.move( - os.path.join(lib_dir, item), - target_item_dir - ) + shutil.move(os.path.join(lib_dir, item), target_item_dir) - def _determine_conflicts(self, to_install): - # type: (List[InstallRequirement]) -> Optional[ConflictDetails] + def _determine_conflicts( + self, to_install: List[InstallRequirement] + ) -> Optional[ConflictDetails]: try: return check_install_conflicts(to_install) except Exception: @@ -520,13 +580,14 @@ class InstallCommand(RequirementCommand): ) return None - def _warn_about_conflicts(self, conflict_details, resolver_variant): - # type: (ConflictDetails, str) -> None + def _warn_about_conflicts( + self, conflict_details: ConflictDetails, resolver_variant: str + ) -> None: package_set, (missing, conflicting) = conflict_details if not missing and not conflicting: return - parts = [] # type: List[str] + parts: List[str] = [] if resolver_variant == "legacy": parts.append( "pip's legacy dependency resolver does not consider dependency " @@ -567,7 +628,7 @@ class InstallCommand(RequirementCommand): requirement=req, dep_name=dep_name, dep_version=dep_version, - you=("you" if resolver_variant == "2020-resolver" else "you'll") + you=("you" if resolver_variant == "2020-resolver" else "you'll"), ) parts.append(message) @@ -575,15 +636,14 @@ class InstallCommand(RequirementCommand): def get_lib_location_guesses( - user=False, # type: bool - home=None, # type: Optional[str] - root=None, # type: Optional[str] - isolated=False, # type: bool - prefix=None # type: Optional[str] -): - # type:(...) -> List[str] + user: bool = False, + home: Optional[str] = None, + root: Optional[str] = None, + isolated: bool = False, + prefix: Optional[str] = None, +) -> List[str]: scheme = get_scheme( - '', + "", user=user, home=home, root=root, @@ -593,22 +653,20 @@ def get_lib_location_guesses( return [scheme.purelib, scheme.platlib] -def site_packages_writable(root, isolated): - # type: (Optional[str], bool) -> bool +def site_packages_writable(root: Optional[str], isolated: bool) -> bool: return all( - test_writable_dir(d) for d in set( - get_lib_location_guesses(root=root, isolated=isolated)) + test_writable_dir(d) + for d in set(get_lib_location_guesses(root=root, isolated=isolated)) ) def decide_user_install( - use_user_site, # type: Optional[bool] - prefix_path=None, # type: Optional[str] - target_dir=None, # type: Optional[str] - root_path=None, # type: Optional[str] - isolated_mode=False, # type: bool -): - # type: (...) -> bool + use_user_site: Optional[bool], + prefix_path: Optional[str] = None, + target_dir: Optional[str] = None, + root_path: Optional[str] = None, + isolated_mode: bool = False, +) -> bool: """Determine whether to do a user install based on the input options. If use_user_site is False, no additional checks are done. @@ -656,55 +714,16 @@ def decide_user_install( logger.debug("Non-user install because site-packages writeable") return False - logger.info("Defaulting to user installation because normal site-packages " - "is not writeable") - return True - - -def reject_location_related_install_options(requirements, options): - # type: (List[InstallRequirement], Optional[List[str]]) -> None - """If any location-changing --install-option arguments were passed for - requirements or on the command-line, then show a deprecation warning. - """ - def format_options(option_names): - # type: (Iterable[str]) -> List[str] - return ["--{}".format(name.replace("_", "-")) for name in option_names] - - offenders = [] - - for requirement in requirements: - install_options = requirement.install_options - location_options = parse_distutils_args(install_options) - if location_options: - offenders.append( - "{!r} from {}".format( - format_options(location_options.keys()), requirement - ) - ) - - if options: - location_options = parse_distutils_args(options) - if location_options: - offenders.append( - "{!r} from command line".format( - format_options(location_options.keys()) - ) - ) - - if not offenders: - return - - raise CommandError( - "Location-changing options found in --install-option: {}." - " This is unsupported, use pip-level options like --user," - " --prefix, --root, and --target instead.".format( - "; ".join(offenders) - ) + logger.info( + "Defaulting to user installation because normal site-packages " + "is not writeable" ) + return True -def create_os_error_message(error, show_traceback, using_user_site): - # type: (OSError, bool, bool) -> str +def create_os_error_message( + error: OSError, show_traceback: bool, using_user_site: bool +) -> str: """Format an error message for an OSError It may occur anytime during the execution of the install command. @@ -729,12 +748,31 @@ def create_os_error_message(error, show_traceback, using_user_site): permissions_part = "Check the permissions" if not running_under_virtualenv() and not using_user_site: - parts.extend([ - user_option_part, " or ", - permissions_part.lower(), - ]) + parts.extend( + [ + user_option_part, + " or ", + permissions_part.lower(), + ] + ) else: parts.append(permissions_part) parts.append(".\n") + # Suggest the user to enable Long Paths if path length is + # more than 260 + if ( + WINDOWS + and error.errno == errno.ENOENT + and error.filename + and len(error.filename) > 260 + ): + parts.append( + "HINT: This error might have occurred since " + "this system does not have Windows Long Path " + "support enabled. You can find information on " + "how to enable this at " + "https://pip.pypa.io/warnings/enable-long-paths\n" + ) + return "".join(parts).strip() + "\n" diff --git a/env/Lib/site-packages/pip/_internal/commands/list.py b/env/Lib/site-packages/pip/_internal/commands/list.py index dcf9432638ad95774a13121fe3d3c40ece978da5..ac10353194f5f17b042c2076b7397b0c12bfe588 100644 --- a/env/Lib/site-packages/pip/_internal/commands/list.py +++ b/env/Lib/site-packages/pip/_internal/commands/list.py @@ -1,9 +1,9 @@ import json import logging from optparse import Values -from typing import Iterator, List, Set, Tuple +from typing import TYPE_CHECKING, Generator, List, Optional, Sequence, Tuple, cast -from pip._vendor.pkg_resources import Distribution +from pip._vendor.packaging.utils import canonicalize_name from pip._internal.cli import cmdoptions from pip._internal.cli.req_command import IndexGroupCommand @@ -11,17 +11,27 @@ from pip._internal.cli.status_codes import SUCCESS from pip._internal.exceptions import CommandError from pip._internal.index.collector import LinkCollector from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import BaseDistribution, get_environment from pip._internal.models.selection_prefs import SelectionPreferences from pip._internal.network.session import PipSession from pip._internal.utils.compat import stdlib_pkgs -from pip._internal.utils.misc import ( - dist_is_editable, - get_installed_distributions, - tabulate, - write_output, -) -from pip._internal.utils.packaging import get_installer -from pip._internal.utils.parallel import map_multithread +from pip._internal.utils.misc import tabulate, write_output + +if TYPE_CHECKING: + from pip._internal.metadata.base import DistributionVersion + + class _DistWithLatestInfo(BaseDistribution): + """Give the distribution object a couple of extra fields. + + These will be populated during ``get_outdated()``. This is dirty but + makes the rest of the code much cleaner. + """ + + latest_version: DistributionVersion + latest_filetype: str + + _ProcessedDists = Sequence[_DistWithLatestInfo] + logger = logging.getLogger(__name__) @@ -37,86 +47,97 @@ class ListCommand(IndexGroupCommand): usage = """ %prog [options]""" - def add_options(self): - # type: () -> None + def add_options(self) -> None: self.cmd_opts.add_option( - '-o', '--outdated', - action='store_true', + "-o", + "--outdated", + action="store_true", default=False, - help='List outdated packages') + help="List outdated packages", + ) self.cmd_opts.add_option( - '-u', '--uptodate', - action='store_true', + "-u", + "--uptodate", + action="store_true", default=False, - help='List uptodate packages') + help="List uptodate packages", + ) self.cmd_opts.add_option( - '-e', '--editable', - action='store_true', + "-e", + "--editable", + action="store_true", default=False, - help='List editable projects.') + help="List editable projects.", + ) self.cmd_opts.add_option( - '-l', '--local', - action='store_true', + "-l", + "--local", + action="store_true", default=False, - help=('If in a virtualenv that has global access, do not list ' - 'globally-installed packages.'), + help=( + "If in a virtualenv that has global access, do not list " + "globally-installed packages." + ), ) self.cmd_opts.add_option( - '--user', - dest='user', - action='store_true', + "--user", + dest="user", + action="store_true", default=False, - help='Only output packages installed in user-site.') + help="Only output packages installed in user-site.", + ) self.cmd_opts.add_option(cmdoptions.list_path()) self.cmd_opts.add_option( - '--pre', - action='store_true', + "--pre", + action="store_true", default=False, - help=("Include pre-release and development versions. By default, " - "pip only finds stable versions."), + help=( + "Include pre-release and development versions. By default, " + "pip only finds stable versions." + ), ) self.cmd_opts.add_option( - '--format', - action='store', - dest='list_format', + "--format", + action="store", + dest="list_format", default="columns", - choices=('columns', 'freeze', 'json'), - help="Select the output format among: columns (default), freeze, " - "or json", + choices=("columns", "freeze", "json"), + help=( + "Select the output format among: columns (default), freeze, or json. " + "The 'freeze' format cannot be used with the --outdated option." + ), ) self.cmd_opts.add_option( - '--not-required', - action='store_true', - dest='not_required', - help="List packages that are not dependencies of " - "installed packages.", + "--not-required", + action="store_true", + dest="not_required", + help="List packages that are not dependencies of installed packages.", ) self.cmd_opts.add_option( - '--exclude-editable', - action='store_false', - dest='include_editable', - help='Exclude editable package from output.', + "--exclude-editable", + action="store_false", + dest="include_editable", + help="Exclude editable package from output.", ) self.cmd_opts.add_option( - '--include-editable', - action='store_true', - dest='include_editable', - help='Include editable package from output.', + "--include-editable", + action="store_true", + dest="include_editable", + help="Include editable package from output.", default=True, ) self.cmd_opts.add_option(cmdoptions.list_exclude()) - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, self.parser - ) + index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser) self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, self.cmd_opts) - def _build_package_finder(self, options, session): - # type: (Values, PipSession) -> PackageFinder + def _build_package_finder( + self, options: Values, session: PipSession + ) -> PackageFinder: """ Create a package finder appropriate to this list command. """ @@ -133,26 +154,31 @@ class ListCommand(IndexGroupCommand): selection_prefs=selection_prefs, ) - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: if options.outdated and options.uptodate: + raise CommandError("Options --outdated and --uptodate cannot be combined.") + + if options.outdated and options.list_format == "freeze": raise CommandError( - "Options --outdated and --uptodate cannot be combined.") + "List format 'freeze' cannot be used with the --outdated option." + ) cmdoptions.check_list_path_option(options) skip = set(stdlib_pkgs) if options.excludes: - skip.update(options.excludes) - - packages = get_installed_distributions( - local_only=options.local, - user_only=options.user, - editables_only=options.editable, - include_editables=options.include_editable, - paths=options.path, - skip=skip, - ) + skip.update(canonicalize_name(n) for n in options.excludes) + + packages: "_ProcessedDists" = [ + cast("_DistWithLatestInfo", d) + for d in get_environment(options.path).iter_installed_distributions( + local_only=options.local, + user_only=options.user, + editables_only=options.editable, + include_editables=options.include_editable, + skip=skip, + ) + ] # get_not_required must be called firstly in order to find and # filter out all dependencies correctly. Otherwise a package @@ -169,46 +195,58 @@ class ListCommand(IndexGroupCommand): self.output_package_listing(packages, options) return SUCCESS - def get_outdated(self, packages, options): - # type: (List[Distribution], Values) -> List[Distribution] + def get_outdated( + self, packages: "_ProcessedDists", options: Values + ) -> "_ProcessedDists": return [ - dist for dist in self.iter_packages_latest_infos(packages, options) - if dist.latest_version > dist.parsed_version + dist + for dist in self.iter_packages_latest_infos(packages, options) + if dist.latest_version > dist.version ] - def get_uptodate(self, packages, options): - # type: (List[Distribution], Values) -> List[Distribution] + def get_uptodate( + self, packages: "_ProcessedDists", options: Values + ) -> "_ProcessedDists": return [ - dist for dist in self.iter_packages_latest_infos(packages, options) - if dist.latest_version == dist.parsed_version + dist + for dist in self.iter_packages_latest_infos(packages, options) + if dist.latest_version == dist.version ] - def get_not_required(self, packages, options): - # type: (List[Distribution], Values) -> List[Distribution] - dep_keys = set() # type: Set[Distribution] - for dist in packages: - dep_keys.update(requirement.key for requirement in dist.requires()) + def get_not_required( + self, packages: "_ProcessedDists", options: Values + ) -> "_ProcessedDists": + dep_keys = { + canonicalize_name(dep.name) + for dist in packages + for dep in (dist.iter_dependencies() or ()) + } # Create a set to remove duplicate packages, and cast it to a list # to keep the return type consistent with get_outdated and # get_uptodate - return list({pkg for pkg in packages if pkg.key not in dep_keys}) + return list({pkg for pkg in packages if pkg.canonical_name not in dep_keys}) - def iter_packages_latest_infos(self, packages, options): - # type: (List[Distribution], Values) -> Iterator[Distribution] + def iter_packages_latest_infos( + self, packages: "_ProcessedDists", options: Values + ) -> Generator["_DistWithLatestInfo", None, None]: with self._build_session(options) as session: finder = self._build_package_finder(options, session) - def latest_info(dist): - # type: (Distribution) -> Distribution - all_candidates = finder.find_all_candidates(dist.key) + def latest_info( + dist: "_DistWithLatestInfo", + ) -> Optional["_DistWithLatestInfo"]: + all_candidates = finder.find_all_candidates(dist.canonical_name) if not options.pre: # Remove prereleases - all_candidates = [candidate for candidate in all_candidates - if not candidate.version.is_prerelease] + all_candidates = [ + candidate + for candidate in all_candidates + if not candidate.version.is_prerelease + ] evaluator = finder.make_candidate_evaluator( - project_name=dist.project_name, + project_name=dist.canonical_name, ) best_candidate = evaluator.sort_best_candidate(all_candidates) if best_candidate is None: @@ -216,39 +254,41 @@ class ListCommand(IndexGroupCommand): remote_version = best_candidate.version if best_candidate.link.is_wheel: - typ = 'wheel' + typ = "wheel" else: - typ = 'sdist' - # This is dirty but makes the rest of the code much cleaner + typ = "sdist" dist.latest_version = remote_version dist.latest_filetype = typ return dist - for dist in map_multithread(latest_info, packages): + for dist in map(latest_info, packages): if dist is not None: yield dist - def output_package_listing(self, packages, options): - # type: (List[Distribution], Values) -> None + def output_package_listing( + self, packages: "_ProcessedDists", options: Values + ) -> None: packages = sorted( packages, - key=lambda dist: dist.project_name.lower(), + key=lambda dist: dist.canonical_name, ) - if options.list_format == 'columns' and packages: + if options.list_format == "columns" and packages: data, header = format_for_columns(packages, options) self.output_package_listing_columns(data, header) - elif options.list_format == 'freeze': + elif options.list_format == "freeze": for dist in packages: if options.verbose >= 1: - write_output("%s==%s (%s)", dist.project_name, - dist.version, dist.location) + write_output( + "%s==%s (%s)", dist.raw_name, dist.version, dist.location + ) else: - write_output("%s==%s", dist.project_name, dist.version) - elif options.list_format == 'json': + write_output("%s==%s", dist.raw_name, dist.version) + elif options.list_format == "json": write_output(format_for_json(packages, options)) - def output_package_listing_columns(self, data, header): - # type: (List[List[str]], List[str]) -> None + def output_package_listing_columns( + self, data: List[List[str]], header: List[str] + ) -> None: # insert the header first: we need to know the size of column names if len(data) > 0: data.insert(0, header) @@ -257,63 +297,72 @@ class ListCommand(IndexGroupCommand): # Create and add a separator. if len(data) > 0: - pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes))) + pkg_strings.insert(1, " ".join(map(lambda x: "-" * x, sizes))) for val in pkg_strings: write_output(val) -def format_for_columns(pkgs, options): - # type: (List[Distribution], Values) -> Tuple[List[List[str]], List[str]] +def format_for_columns( + pkgs: "_ProcessedDists", options: Values +) -> Tuple[List[List[str]], List[str]]: """ Convert the package data into something usable by output_package_listing_columns. """ + header = ["Package", "Version"] + running_outdated = options.outdated - # Adjust the header for the `pip list --outdated` case. if running_outdated: - header = ["Package", "Version", "Latest", "Type"] - else: - header = ["Package", "Version"] + header.extend(["Latest", "Type"]) - data = [] - if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs): + has_editables = any(x.editable for x in pkgs) + if has_editables: + header.append("Editable project location") + + if options.verbose >= 1: header.append("Location") if options.verbose >= 1: header.append("Installer") + data = [] for proj in pkgs: # if we're working on the 'outdated' list, separate out the # latest_version and type - row = [proj.project_name, proj.version] + row = [proj.raw_name, str(proj.version)] if running_outdated: - row.append(proj.latest_version) + row.append(str(proj.latest_version)) row.append(proj.latest_filetype) - if options.verbose >= 1 or dist_is_editable(proj): - row.append(proj.location) + if has_editables: + row.append(proj.editable_project_location or "") + + if options.verbose >= 1: + row.append(proj.location or "") if options.verbose >= 1: - row.append(get_installer(proj)) + row.append(proj.installer) data.append(row) return data, header -def format_for_json(packages, options): - # type: (List[Distribution], Values) -> str +def format_for_json(packages: "_ProcessedDists", options: Values) -> str: data = [] for dist in packages: info = { - 'name': dist.project_name, - 'version': str(dist.version), + "name": dist.raw_name, + "version": str(dist.version), } if options.verbose >= 1: - info['location'] = dist.location - info['installer'] = get_installer(dist) + info["location"] = dist.location or "" + info["installer"] = dist.installer if options.outdated: - info['latest_version'] = str(dist.latest_version) - info['latest_filetype'] = dist.latest_filetype + info["latest_version"] = str(dist.latest_version) + info["latest_filetype"] = dist.latest_filetype + editable_project_location = dist.editable_project_location + if editable_project_location: + info["editable_project_location"] = editable_project_location data.append(info) return json.dumps(data) diff --git a/env/Lib/site-packages/pip/_internal/commands/search.py b/env/Lib/site-packages/pip/_internal/commands/search.py index d66e823471c94154501acb7a31eedeee111d4e3a..03ed925b246dd551ec2ef45095ed6cad00fd2745 100644 --- a/env/Lib/site-packages/pip/_internal/commands/search.py +++ b/env/Lib/site-packages/pip/_internal/commands/search.py @@ -27,6 +27,7 @@ if TYPE_CHECKING: summary: str versions: List[str] + logger = logging.getLogger(__name__) @@ -37,21 +38,21 @@ class SearchCommand(Command, SessionCommandMixin): %prog [options] <query>""" ignore_require_venv = True - def add_options(self): - # type: () -> None + def add_options(self) -> None: self.cmd_opts.add_option( - '-i', '--index', - dest='index', - metavar='URL', + "-i", + "--index", + dest="index", + metavar="URL", default=PyPI.pypi_url, - help='Base URL of Python Package Index (default %default)') + help="Base URL of Python Package Index (default %default)", + ) self.parser.insert_option_group(0, self.cmd_opts) - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: if not args: - raise CommandError('Missing required argument (search query).') + raise CommandError("Missing required argument (search query).") query = args pypi_hits = self.search(query, options) hits = transform_hits(pypi_hits) @@ -65,8 +66,7 @@ class SearchCommand(Command, SessionCommandMixin): return SUCCESS return NO_MATCHES_FOUND - def search(self, query, options): - # type: (List[str], Values) -> List[Dict[str, str]] + def search(self, query: List[str], options: Values) -> List[Dict[str, str]]: index_url = options.index session = self.get_default_session(options) @@ -74,7 +74,7 @@ class SearchCommand(Command, SessionCommandMixin): transport = PipXmlrpcTransport(index_url, session) pypi = xmlrpc.client.ServerProxy(index_url, transport) try: - hits = pypi.search({'name': query, 'summary': query}, 'or') + hits = pypi.search({"name": query, "summary": query}, "or") except xmlrpc.client.Fault as fault: message = "XMLRPC request failed [code: {code}]\n{string}".format( code=fault.faultCode, @@ -85,78 +85,90 @@ class SearchCommand(Command, SessionCommandMixin): return hits -def transform_hits(hits): - # type: (List[Dict[str, str]]) -> List[TransformedHit] +def transform_hits(hits: List[Dict[str, str]]) -> List["TransformedHit"]: """ The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use. """ - packages = OrderedDict() # type: OrderedDict[str, TransformedHit] + packages: Dict[str, "TransformedHit"] = OrderedDict() for hit in hits: - name = hit['name'] - summary = hit['summary'] - version = hit['version'] + name = hit["name"] + summary = hit["summary"] + version = hit["version"] if name not in packages.keys(): packages[name] = { - 'name': name, - 'summary': summary, - 'versions': [version], + "name": name, + "summary": summary, + "versions": [version], } else: - packages[name]['versions'].append(version) + packages[name]["versions"].append(version) # if this is the highest version, replace summary and score - if version == highest_version(packages[name]['versions']): - packages[name]['summary'] = summary + if version == highest_version(packages[name]["versions"]): + packages[name]["summary"] = summary return list(packages.values()) -def print_results(hits, name_column_width=None, terminal_width=None): - # type: (List[TransformedHit], Optional[int], Optional[int]) -> None +def print_dist_installation_info(name: str, latest: str) -> None: + env = get_default_environment() + dist = env.get_distribution(name) + if dist is not None: + with indent_log(): + if dist.version == latest: + write_output("INSTALLED: %s (latest)", dist.version) + else: + write_output("INSTALLED: %s", dist.version) + if parse_version(latest).pre: + write_output( + "LATEST: %s (pre-release; install" + " with `pip install --pre`)", + latest, + ) + else: + write_output("LATEST: %s", latest) + + +def print_results( + hits: List["TransformedHit"], + name_column_width: Optional[int] = None, + terminal_width: Optional[int] = None, +) -> None: if not hits: return if name_column_width is None: - name_column_width = max([ - len(hit['name']) + len(highest_version(hit.get('versions', ['-']))) - for hit in hits - ]) + 4 + name_column_width = ( + max( + [ + len(hit["name"]) + len(highest_version(hit.get("versions", ["-"]))) + for hit in hits + ] + ) + + 4 + ) - env = get_default_environment() for hit in hits: - name = hit['name'] - summary = hit['summary'] or '' - latest = highest_version(hit.get('versions', ['-'])) + name = hit["name"] + summary = hit["summary"] or "" + latest = highest_version(hit.get("versions", ["-"])) if terminal_width is not None: target_width = terminal_width - name_column_width - 5 if target_width > 10: # wrap and indent summary to fit terminal summary_lines = textwrap.wrap(summary, target_width) - summary = ('\n' + ' ' * (name_column_width + 3)).join( - summary_lines) + summary = ("\n" + " " * (name_column_width + 3)).join(summary_lines) - name_latest = f'{name} ({latest})' - line = f'{name_latest:{name_column_width}} - {summary}' + name_latest = f"{name} ({latest})" + line = f"{name_latest:{name_column_width}} - {summary}" try: write_output(line) - dist = env.get_distribution(name) - if dist is not None: - with indent_log(): - if dist.version == latest: - write_output('INSTALLED: %s (latest)', dist.version) - else: - write_output('INSTALLED: %s', dist.version) - if parse_version(latest).pre: - write_output('LATEST: %s (pre-release; install' - ' with "pip install --pre")', latest) - else: - write_output('LATEST: %s', latest) + print_dist_installation_info(name, latest) except UnicodeEncodeError: pass -def highest_version(versions): - # type: (List[str]) -> str +def highest_version(versions: List[str]) -> str: return max(versions, key=parse_version) diff --git a/env/Lib/site-packages/pip/_internal/commands/show.py b/env/Lib/site-packages/pip/_internal/commands/show.py index 24e855a80d82944ab9721636e7472481c65b4ccb..3f10701f6b28c72b62c9904fec37b96bdd199dcc 100644 --- a/env/Lib/site-packages/pip/_internal/commands/show.py +++ b/env/Lib/site-packages/pip/_internal/commands/show.py @@ -1,14 +1,12 @@ import logging -import os -from email.parser import FeedParser from optparse import Values -from typing import Dict, Iterator, List +from typing import Generator, Iterable, Iterator, List, NamedTuple, Optional -from pip._vendor import pkg_resources from pip._vendor.packaging.utils import canonicalize_name from pip._internal.cli.base_command import Command from pip._internal.cli.status_codes import ERROR, SUCCESS +from pip._internal.metadata import BaseDistribution, get_default_environment from pip._internal.utils.misc import write_output logger = logging.getLogger(__name__) @@ -25,123 +23,126 @@ class ShowCommand(Command): %prog [options] <package> ...""" ignore_require_venv = True - def add_options(self): - # type: () -> None + def add_options(self) -> None: self.cmd_opts.add_option( - '-f', '--files', - dest='files', - action='store_true', + "-f", + "--files", + dest="files", + action="store_true", default=False, - help='Show the full list of installed files for each package.') + help="Show the full list of installed files for each package.", + ) self.parser.insert_option_group(0, self.cmd_opts) - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: if not args: - logger.warning('ERROR: Please provide a package name or names.') + logger.warning("ERROR: Please provide a package name or names.") return ERROR query = args results = search_packages_info(query) if not print_results( - results, list_files=options.files, verbose=options.verbose): + results, list_files=options.files, verbose=options.verbose + ): return ERROR return SUCCESS -def search_packages_info(query): - # type: (List[str]) -> Iterator[Dict[str, str]] +class _PackageInfo(NamedTuple): + name: str + version: str + location: str + editable_project_location: Optional[str] + requires: List[str] + required_by: List[str] + installer: str + metadata_version: str + classifiers: List[str] + summary: str + homepage: str + project_urls: List[str] + author: str + author_email: str + license: str + entry_points: List[str] + files: Optional[List[str]] + + +def search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None]: """ Gather details from installed distributions. Print distribution name, version, location, and installed files. Installed files requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory. """ - installed = {} - for p in pkg_resources.working_set: - installed[canonicalize_name(p.project_name)] = p + env = get_default_environment() + installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()} query_names = [canonicalize_name(name) for name in query] missing = sorted( [name for name, pkg in zip(query, query_names) if pkg not in installed] ) if missing: - logger.warning('Package(s) not found: %s', ', '.join(missing)) - - def get_requiring_packages(package_name): - # type: (str) -> List[str] - canonical_name = canonicalize_name(package_name) - return [ - pkg.project_name for pkg in pkg_resources.working_set - if canonical_name in - [canonicalize_name(required.name) for required in - pkg.requires()] - ] - - for dist in [installed[pkg] for pkg in query_names if pkg in installed]: - package = { - 'name': dist.project_name, - 'version': dist.version, - 'location': dist.location, - 'requires': [dep.project_name for dep in dist.requires()], - 'required_by': get_requiring_packages(dist.project_name) - } - file_list = None - metadata = '' - if isinstance(dist, pkg_resources.DistInfoDistribution): - # RECORDs should be part of .dist-info metadatas - if dist.has_metadata('RECORD'): - lines = dist.get_metadata_lines('RECORD') - paths = [line.split(',')[0] for line in lines] - paths = [os.path.join(dist.location, p) for p in paths] - file_list = [os.path.relpath(p, dist.location) for p in paths] - - if dist.has_metadata('METADATA'): - metadata = dist.get_metadata('METADATA') + logger.warning("Package(s) not found: %s", ", ".join(missing)) + + def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]: + return ( + dist.metadata["Name"] or "UNKNOWN" + for dist in installed.values() + if current_dist.canonical_name + in {canonicalize_name(d.name) for d in dist.iter_dependencies()} + ) + + for query_name in query_names: + try: + dist = installed[query_name] + except KeyError: + continue + + requires = sorted((req.name for req in dist.iter_dependencies()), key=str.lower) + required_by = sorted(_get_requiring_packages(dist), key=str.lower) + + try: + entry_points_text = dist.read_text("entry_points.txt") + entry_points = entry_points_text.splitlines(keepends=False) + except FileNotFoundError: + entry_points = [] + + files_iter = dist.iter_declared_entries() + if files_iter is None: + files: Optional[List[str]] = None else: - # Otherwise use pip's log for .egg-info's - if dist.has_metadata('installed-files.txt'): - paths = dist.get_metadata_lines('installed-files.txt') - paths = [os.path.join(dist.egg_info, p) for p in paths] - file_list = [os.path.relpath(p, dist.location) for p in paths] - - if dist.has_metadata('PKG-INFO'): - metadata = dist.get_metadata('PKG-INFO') - - if dist.has_metadata('entry_points.txt'): - entry_points = dist.get_metadata_lines('entry_points.txt') - package['entry_points'] = entry_points - - if dist.has_metadata('INSTALLER'): - for line in dist.get_metadata_lines('INSTALLER'): - if line.strip(): - package['installer'] = line.strip() - break - - # @todo: Should pkg_resources.Distribution have a - # `get_pkg_info` method? - feed_parser = FeedParser() - feed_parser.feed(metadata) - pkg_info_dict = feed_parser.close() - for key in ('metadata-version', 'summary', - 'home-page', 'author', 'author-email', 'license'): - package[key] = pkg_info_dict.get(key) - - # It looks like FeedParser cannot deal with repeated headers - classifiers = [] - for line in metadata.splitlines(): - if line.startswith('Classifier: '): - classifiers.append(line[len('Classifier: '):]) - package['classifiers'] = classifiers - - if file_list: - package['files'] = sorted(file_list) - yield package - - -def print_results(distributions, list_files=False, verbose=False): - # type: (Iterator[Dict[str, str]], bool, bool) -> bool + files = sorted(files_iter) + + metadata = dist.metadata + + yield _PackageInfo( + name=dist.raw_name, + version=str(dist.version), + location=dist.location or "", + editable_project_location=dist.editable_project_location, + requires=requires, + required_by=required_by, + installer=dist.installer, + metadata_version=dist.metadata_version or "", + classifiers=metadata.get_all("Classifier", []), + summary=metadata.get("Summary", ""), + homepage=metadata.get("Home-page", ""), + project_urls=metadata.get_all("Project-URL", []), + author=metadata.get("Author", ""), + author_email=metadata.get("Author-email", ""), + license=metadata.get("License", ""), + entry_points=entry_points, + files=files, + ) + + +def print_results( + distributions: Iterable[_PackageInfo], + list_files: bool, + verbose: bool, +) -> bool: """ Print the information from installed distributions found. """ @@ -151,31 +152,38 @@ def print_results(distributions, list_files=False, verbose=False): if i > 0: write_output("---") - write_output("Name: %s", dist.get('name', '')) - write_output("Version: %s", dist.get('version', '')) - write_output("Summary: %s", dist.get('summary', '')) - write_output("Home-page: %s", dist.get('home-page', '')) - write_output("Author: %s", dist.get('author', '')) - write_output("Author-email: %s", dist.get('author-email', '')) - write_output("License: %s", dist.get('license', '')) - write_output("Location: %s", dist.get('location', '')) - write_output("Requires: %s", ', '.join(dist.get('requires', []))) - write_output("Required-by: %s", ', '.join(dist.get('required_by', []))) + write_output("Name: %s", dist.name) + write_output("Version: %s", dist.version) + write_output("Summary: %s", dist.summary) + write_output("Home-page: %s", dist.homepage) + write_output("Author: %s", dist.author) + write_output("Author-email: %s", dist.author_email) + write_output("License: %s", dist.license) + write_output("Location: %s", dist.location) + if dist.editable_project_location is not None: + write_output( + "Editable project location: %s", dist.editable_project_location + ) + write_output("Requires: %s", ", ".join(dist.requires)) + write_output("Required-by: %s", ", ".join(dist.required_by)) if verbose: - write_output("Metadata-Version: %s", - dist.get('metadata-version', '')) - write_output("Installer: %s", dist.get('installer', '')) + write_output("Metadata-Version: %s", dist.metadata_version) + write_output("Installer: %s", dist.installer) write_output("Classifiers:") - for classifier in dist.get('classifiers', []): + for classifier in dist.classifiers: write_output(" %s", classifier) write_output("Entry-points:") - for entry in dist.get('entry_points', []): + for entry in dist.entry_points: write_output(" %s", entry.strip()) + write_output("Project-URLs:") + for project_url in dist.project_urls: + write_output(" %s", project_url) if list_files: write_output("Files:") - for line in dist.get('files', []): - write_output(" %s", line.strip()) - if "files" not in dist: - write_output("Cannot locate installed-files.txt") + if dist.files is None: + write_output("Cannot locate RECORD or installed-files.txt") + else: + for line in dist.files: + write_output(" %s", line.strip()) return results_printed diff --git a/env/Lib/site-packages/pip/_internal/commands/uninstall.py b/env/Lib/site-packages/pip/_internal/commands/uninstall.py index 9a3c9f8815cec75684e6de1057c1a4c442e416ca..f198fc313ff57929d95d36216e3e6ecec3877673 100644 --- a/env/Lib/site-packages/pip/_internal/commands/uninstall.py +++ b/env/Lib/site-packages/pip/_internal/commands/uninstall.py @@ -1,8 +1,10 @@ +import logging from optparse import Values from typing import List from pip._vendor.packaging.utils import canonicalize_name +from pip._internal.cli import cmdoptions from pip._internal.cli.base_command import Command from pip._internal.cli.req_command import SessionCommandMixin, warn_if_run_as_root from pip._internal.cli.status_codes import SUCCESS @@ -12,7 +14,12 @@ from pip._internal.req.constructors import ( install_req_from_line, install_req_from_parsed_requirement, ) -from pip._internal.utils.misc import protect_pip_from_modification_on_windows +from pip._internal.utils.misc import ( + check_externally_managed, + protect_pip_from_modification_on_windows, +) + +logger = logging.getLogger(__name__) class UninstallCommand(Command, SessionCommandMixin): @@ -30,63 +37,77 @@ class UninstallCommand(Command, SessionCommandMixin): %prog [options] <package> ... %prog [options] -r <requirements file> ...""" - def add_options(self): - # type: () -> None + def add_options(self) -> None: self.cmd_opts.add_option( - '-r', '--requirement', - dest='requirements', - action='append', + "-r", + "--requirement", + dest="requirements", + action="append", default=[], - metavar='file', - help='Uninstall all the packages listed in the given requirements ' - 'file. This option can be used multiple times.', + metavar="file", + help=( + "Uninstall all the packages listed in the given requirements " + "file. This option can be used multiple times." + ), ) self.cmd_opts.add_option( - '-y', '--yes', - dest='yes', - action='store_true', - help="Don't ask for confirmation of uninstall deletions.") - + "-y", + "--yes", + dest="yes", + action="store_true", + help="Don't ask for confirmation of uninstall deletions.", + ) + self.cmd_opts.add_option(cmdoptions.root_user_action()) + self.cmd_opts.add_option(cmdoptions.override_externally_managed()) self.parser.insert_option_group(0, self.cmd_opts) - def run(self, options, args): - # type: (Values, List[str]) -> int + def run(self, options: Values, args: List[str]) -> int: session = self.get_default_session(options) reqs_to_uninstall = {} for name in args: req = install_req_from_line( - name, isolated=options.isolated_mode, + name, + isolated=options.isolated_mode, ) if req.name: reqs_to_uninstall[canonicalize_name(req.name)] = req + else: + logger.warning( + "Invalid requirement: %r ignored -" + " the uninstall command expects named" + " requirements.", + name, + ) for filename in options.requirements: for parsed_req in parse_requirements( - filename, - options=options, - session=session): + filename, options=options, session=session + ): req = install_req_from_parsed_requirement( - parsed_req, - isolated=options.isolated_mode + parsed_req, isolated=options.isolated_mode ) if req.name: reqs_to_uninstall[canonicalize_name(req.name)] = req if not reqs_to_uninstall: raise InstallationError( - f'You must give at least one requirement to {self.name} (see ' + f"You must give at least one requirement to {self.name} (see " f'"pip help {self.name}")' ) + if not options.override_externally_managed: + check_externally_managed() + protect_pip_from_modification_on_windows( modifying_pip="pip" in reqs_to_uninstall ) for req in reqs_to_uninstall.values(): uninstall_pathset = req.uninstall( - auto_confirm=options.yes, verbose=self.verbosity > 0, + auto_confirm=options.yes, + verbose=self.verbosity > 0, ) if uninstall_pathset: uninstall_pathset.commit() - - warn_if_run_as_root() + if options.root_user_action == "warn": + warn_if_run_as_root() return SUCCESS diff --git a/env/Lib/site-packages/pip/_internal/commands/wheel.py b/env/Lib/site-packages/pip/_internal/commands/wheel.py index ff47dbac51cfabe5a768ea2527e8bca741602a63..ed578aa2500d8917d5d3ed1249526b48ad7ee996 100644 --- a/env/Lib/site-packages/pip/_internal/commands/wheel.py +++ b/env/Lib/site-packages/pip/_internal/commands/wheel.py @@ -9,8 +9,11 @@ from pip._internal.cli import cmdoptions from pip._internal.cli.req_command import RequirementCommand, with_cleanup from pip._internal.cli.status_codes import SUCCESS from pip._internal.exceptions import CommandError -from pip._internal.req.req_install import InstallRequirement -from pip._internal.req.req_tracker import get_requirement_tracker +from pip._internal.operations.build.build_tracker import get_build_tracker +from pip._internal.req.req_install import ( + InstallRequirement, + check_legacy_setup_py_options, +) from pip._internal.utils.misc import ensure_dir, normalize_path from pip._internal.utils.temp_dir import TempDirectory from pip._internal.wheel_builder import build, should_build_for_wheel_command @@ -26,10 +29,8 @@ class WheelCommand(RequirementCommand): recompiling your software during every install. For more details, see the wheel docs: https://wheel.readthedocs.io/en/latest/ - Requirements: setuptools>=0.8, and wheel. - - 'pip wheel' uses the bdist_wheel setuptools extension from the wheel - package to build individual wheels. + 'pip wheel' uses the build system interface as described here: + https://pip.pypa.io/en/stable/reference/build-system/ """ @@ -40,16 +41,17 @@ class WheelCommand(RequirementCommand): %prog [options] [-e] <local project path> ... %prog [options] <archive url/path> ...""" - def add_options(self): - # type: () -> None - + def add_options(self) -> None: self.cmd_opts.add_option( - '-w', '--wheel-dir', - dest='wheel_dir', - metavar='dir', + "-w", + "--wheel-dir", + dest="wheel_dir", + metavar="dir", default=os.curdir, - help=("Build wheels into <dir>, where the default is the " - "current working directory."), + help=( + "Build wheels into <dir>, where the default is the " + "current working directory." + ), ) self.cmd_opts.add_option(cmdoptions.no_binary()) self.cmd_opts.add_option(cmdoptions.only_binary()) @@ -57,32 +59,35 @@ class WheelCommand(RequirementCommand): self.cmd_opts.add_option(cmdoptions.no_build_isolation()) self.cmd_opts.add_option(cmdoptions.use_pep517()) self.cmd_opts.add_option(cmdoptions.no_use_pep517()) + self.cmd_opts.add_option(cmdoptions.check_build_deps()) self.cmd_opts.add_option(cmdoptions.constraints()) self.cmd_opts.add_option(cmdoptions.editable()) self.cmd_opts.add_option(cmdoptions.requirements()) self.cmd_opts.add_option(cmdoptions.src()) self.cmd_opts.add_option(cmdoptions.ignore_requires_python()) self.cmd_opts.add_option(cmdoptions.no_deps()) - self.cmd_opts.add_option(cmdoptions.build_dir()) self.cmd_opts.add_option(cmdoptions.progress_bar()) self.cmd_opts.add_option( - '--no-verify', - dest='no_verify', - action='store_true', + "--no-verify", + dest="no_verify", + action="store_true", default=False, help="Don't verify if built wheel is valid.", ) + self.cmd_opts.add_option(cmdoptions.config_settings()) self.cmd_opts.add_option(cmdoptions.build_options()) self.cmd_opts.add_option(cmdoptions.global_options()) self.cmd_opts.add_option( - '--pre', - action='store_true', + "--pre", + action="store_true", default=False, - help=("Include pre-release and development versions. By default, " - "pip only finds stable versions."), + help=( + "Include pre-release and development versions. By default, " + "pip only finds stable versions." + ), ) self.cmd_opts.add_option(cmdoptions.require_hashes()) @@ -96,19 +101,15 @@ class WheelCommand(RequirementCommand): self.parser.insert_option_group(0, self.cmd_opts) @with_cleanup - def run(self, options, args): - # type: (Values, List[str]) -> int - cmdoptions.check_install_build_global(options) - + def run(self, options: Values, args: List[str]) -> int: session = self.get_default_session(options) finder = self._build_package_finder(options, session) - wheel_cache = WheelCache(options.cache_dir, options.format_control) options.wheel_dir = normalize_path(options.wheel_dir) ensure_dir(options.wheel_dir) - req_tracker = self.enter_context(get_requirement_tracker()) + build_tracker = self.enter_context(get_build_tracker()) directory = TempDirectory( delete=not options.no_clean, @@ -117,15 +118,19 @@ class WheelCommand(RequirementCommand): ) reqs = self.get_requirements(args, options, finder, session) + check_legacy_setup_py_options(options, reqs) + + wheel_cache = WheelCache(options.cache_dir) preparer = self.make_requirement_preparer( temp_build_dir=directory, options=options, - req_tracker=req_tracker, + build_tracker=build_tracker, session=session, finder=finder, download_dir=options.wheel_dir, use_user_site=False, + verbosity=self.verbosity, ) resolver = self.make_resolver( @@ -139,17 +144,18 @@ class WheelCommand(RequirementCommand): self.trace_basic_info(finder) - requirement_set = resolver.resolve( - reqs, check_supported_wheels=True - ) + requirement_set = resolver.resolve(reqs, check_supported_wheels=True) - reqs_to_build = [] # type: List[InstallRequirement] + reqs_to_build: List[InstallRequirement] = [] for req in requirement_set.requirements.values(): if req.is_wheel: preparer.save_linked_requirement(req) elif should_build_for_wheel_command(req): reqs_to_build.append(req) + preparer.prepare_linked_requirements_more(requirement_set.requirements.values()) + requirement_set.warn_legacy_versions_and_specifiers() + # build wheels build_successes, build_failures = build( reqs_to_build, @@ -167,12 +173,11 @@ class WheelCommand(RequirementCommand): except OSError as e: logger.warning( "Building wheel for %s failed: %s", - req.name, e, + req.name, + e, ) build_failures.append(req) if len(build_failures) != 0: - raise CommandError( - "Failed to build one or more wheels" - ) + raise CommandError("Failed to build one or more wheels") return SUCCESS diff --git a/env/Lib/site-packages/pip/_internal/configuration.py b/env/Lib/site-packages/pip/_internal/configuration.py index a4698ec1ddacd998954ebb80d8069984acd46fe7..96f824955bf098d86e54cd8bce3bf0015f976ec2 100644 --- a/env/Lib/site-packages/pip/_internal/configuration.py +++ b/env/Lib/site-packages/pip/_internal/configuration.py @@ -13,7 +13,6 @@ Some terminology: import configparser import locale -import logging import os import sys from typing import Any, Dict, Iterable, List, NewType, Optional, Tuple @@ -24,41 +23,39 @@ from pip._internal.exceptions import ( ) from pip._internal.utils import appdirs from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.logging import getLogger from pip._internal.utils.misc import ensure_dir, enum RawConfigParser = configparser.RawConfigParser # Shorthand Kind = NewType("Kind", str) -CONFIG_BASENAME = 'pip.ini' if WINDOWS else 'pip.conf' +CONFIG_BASENAME = "pip.ini" if WINDOWS else "pip.conf" ENV_NAMES_IGNORED = "version", "help" # The kinds of configurations there are. kinds = enum( - USER="user", # User Specific - GLOBAL="global", # System Wide - SITE="site", # [Virtual] Environment Specific - ENV="env", # from PIP_CONFIG_FILE + USER="user", # User Specific + GLOBAL="global", # System Wide + SITE="site", # [Virtual] Environment Specific + ENV="env", # from PIP_CONFIG_FILE ENV_VAR="env-var", # from Environment Variables ) OVERRIDE_ORDER = kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR VALID_LOAD_ONLY = kinds.USER, kinds.GLOBAL, kinds.SITE -logger = logging.getLogger(__name__) +logger = getLogger(__name__) # NOTE: Maybe use the optionx attribute to normalize keynames. -def _normalize_name(name): - # type: (str) -> str - """Make a name consistent regardless of source (environment or file) - """ - name = name.lower().replace('_', '-') - if name.startswith('--'): +def _normalize_name(name: str) -> str: + """Make a name consistent regardless of source (environment or file)""" + name = name.lower().replace("_", "-") + if name.startswith("--"): name = name[2:] # only prefer long opts return name -def _disassemble_key(name): - # type: (str) -> List[str] +def _disassemble_key(name: str) -> List[str]: if "." not in name: error_message = ( "Key does not contain dot separated section and key. " @@ -68,22 +65,18 @@ def _disassemble_key(name): return name.split(".", 1) -def get_configuration_files(): - # type: () -> Dict[Kind, List[str]] +def get_configuration_files() -> Dict[Kind, List[str]]: global_config_files = [ - os.path.join(path, CONFIG_BASENAME) - for path in appdirs.site_config_dirs('pip') + os.path.join(path, CONFIG_BASENAME) for path in appdirs.site_config_dirs("pip") ] site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME) legacy_config_file = os.path.join( - os.path.expanduser('~'), - 'pip' if WINDOWS else '.pip', + os.path.expanduser("~"), + "pip" if WINDOWS else ".pip", CONFIG_BASENAME, ) - new_config_file = os.path.join( - appdirs.user_config_dir("pip"), CONFIG_BASENAME - ) + new_config_file = os.path.join(appdirs.user_config_dir("pip"), CONFIG_BASENAME) return { kinds.GLOBAL: global_config_files, kinds.SITE: [site_config_file], @@ -105,8 +98,7 @@ class Configuration: and the data stored is also nice. """ - def __init__(self, isolated, load_only=None): - # type: (bool, Optional[Kind]) -> None + def __init__(self, isolated: bool, load_only: Optional[Kind] = None) -> None: super().__init__() if load_only is not None and load_only not in VALID_LOAD_ONLY: @@ -119,54 +111,50 @@ class Configuration: self.load_only = load_only # Because we keep track of where we got the data from - self._parsers = { + self._parsers: Dict[Kind, List[Tuple[str, RawConfigParser]]] = { variant: [] for variant in OVERRIDE_ORDER - } # type: Dict[Kind, List[Tuple[str, RawConfigParser]]] - self._config = { + } + self._config: Dict[Kind, Dict[str, Any]] = { variant: {} for variant in OVERRIDE_ORDER - } # type: Dict[Kind, Dict[str, Any]] - self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]] + } + self._modified_parsers: List[Tuple[str, RawConfigParser]] = [] - def load(self): - # type: () -> None - """Loads configuration from configuration files and environment - """ + def load(self) -> None: + """Loads configuration from configuration files and environment""" self._load_config_files() if not self.isolated: self._load_environment_vars() - def get_file_to_edit(self): - # type: () -> Optional[str] - """Returns the file with highest priority in configuration - """ - assert self.load_only is not None, \ - "Need to be specified a file to be editing" + def get_file_to_edit(self) -> Optional[str]: + """Returns the file with highest priority in configuration""" + assert self.load_only is not None, "Need to be specified a file to be editing" try: return self._get_parser_to_modify()[0] except IndexError: return None - def items(self): - # type: () -> Iterable[Tuple[str, Any]] + def items(self) -> Iterable[Tuple[str, Any]]: """Returns key-value pairs like dict.items() representing the loaded configuration """ return self._dictionary.items() - def get_value(self, key): - # type: (str) -> Any - """Get a value from the configuration. - """ + def get_value(self, key: str) -> Any: + """Get a value from the configuration.""" + orig_key = key + key = _normalize_name(key) try: return self._dictionary[key] except KeyError: - raise ConfigurationError(f"No such key - {key}") - - def set_value(self, key, value): - # type: (str, Any) -> None - """Modify a value in the configuration. - """ + # disassembling triggers a more useful error message than simply + # "No such key" in the case that the key isn't in the form command.option + _disassemble_key(key) + raise ConfigurationError(f"No such key - {orig_key}") + + def set_value(self, key: str, value: Any) -> None: + """Modify a value in the configuration.""" + key = _normalize_name(key) self._ensure_have_load_only() assert self.load_only @@ -183,21 +171,23 @@ class Configuration: self._config[self.load_only][key] = value self._mark_as_modified(fname, parser) - def unset_value(self, key): - # type: (str) -> None + def unset_value(self, key: str) -> None: """Unset a value in the configuration.""" + orig_key = key + key = _normalize_name(key) self._ensure_have_load_only() assert self.load_only if key not in self._config[self.load_only]: - raise ConfigurationError(f"No such key - {key}") + raise ConfigurationError(f"No such key - {orig_key}") fname, parser = self._get_parser_to_modify() if parser is not None: section, name = _disassemble_key(key) - if not (parser.has_section(section) - and parser.remove_option(section, name)): + if not ( + parser.has_section(section) and parser.remove_option(section, name) + ): # The option was not removed. raise ConfigurationError( "Fatal Internal error [id=1]. Please report as a bug." @@ -210,10 +200,8 @@ class Configuration: del self._config[self.load_only][key] - def save(self): - # type: () -> None - """Save the current in-memory state. - """ + def save(self) -> None: + """Save the current in-memory state.""" self._ensure_have_load_only() for fname, parser in self._modified_parsers: @@ -222,24 +210,28 @@ class Configuration: # Ensure directory exists. ensure_dir(os.path.dirname(fname)) - with open(fname, "w") as f: - parser.write(f) + # Ensure directory's permission(need to be writeable) + try: + with open(fname, "w") as f: + parser.write(f) + except OSError as error: + raise ConfigurationError( + f"An error occurred while writing to the configuration file " + f"{fname}: {error}" + ) # # Private routines # - def _ensure_have_load_only(self): - # type: () -> None + def _ensure_have_load_only(self) -> None: if self.load_only is None: raise ConfigurationError("Needed a specific file to be modifying.") logger.debug("Will be working with %s variant only", self.load_only) @property - def _dictionary(self): - # type: () -> Dict[str, Any] - """A dictionary representing the loaded configuration. - """ + def _dictionary(self) -> Dict[str, Any]: + """A dictionary representing the loaded configuration.""" # NOTE: Dictionaries are not populated if not loaded. So, conditionals # are not needed here. retval = {} @@ -249,10 +241,8 @@ class Configuration: return retval - def _load_config_files(self): - # type: () -> None - """Loads configuration from configuration files - """ + def _load_config_files(self) -> None: + """Loads configuration from configuration files""" config_files = dict(self.iter_config_files()) if config_files[kinds.ENV][0:1] == [os.devnull]: logger.debug( @@ -266,9 +256,7 @@ class Configuration: # If there's specific variant set in `load_only`, load only # that variant, not the others. if self.load_only is not None and variant != self.load_only: - logger.debug( - "Skipping file '%s' (variant: %s)", fname, variant - ) + logger.debug("Skipping file '%s' (variant: %s)", fname, variant) continue parser = self._load_file(variant, fname) @@ -276,9 +264,8 @@ class Configuration: # Keeping track of the parsers used self._parsers[variant].append((fname, parser)) - def _load_file(self, variant, fname): - # type: (Kind, str) -> RawConfigParser - logger.debug("For variant '%s', will try loading '%s'", variant, fname) + def _load_file(self, variant: Kind, fname: str) -> RawConfigParser: + logger.verbose("For variant '%s', will try loading '%s'", variant, fname) parser = self._construct_parser(fname) for section in parser.sections(): @@ -287,22 +274,20 @@ class Configuration: return parser - def _construct_parser(self, fname): - # type: (str) -> RawConfigParser + def _construct_parser(self, fname: str) -> RawConfigParser: parser = configparser.RawConfigParser() # If there is no such file, don't bother reading it but create the # parser anyway, to hold the data. # Doing this is useful when modifying and saving files, where we don't # need to construct a parser. if os.path.exists(fname): + locale_encoding = locale.getpreferredencoding(False) try: - parser.read(fname) + parser.read(fname, encoding=locale_encoding) except UnicodeDecodeError: # See https://github.com/pypa/pip/issues/4963 raise ConfigurationFileCouldNotBeLoaded( - reason="contains invalid {} characters".format( - locale.getpreferredencoding(False) - ), + reason=f"contains invalid {locale_encoding} characters", fname=fname, ) except configparser.Error as error: @@ -310,16 +295,15 @@ class Configuration: raise ConfigurationFileCouldNotBeLoaded(error=error) return parser - def _load_environment_vars(self): - # type: () -> None - """Loads configuration from environment variables - """ + def _load_environment_vars(self) -> None: + """Loads configuration from environment variables""" self._config[kinds.ENV_VAR].update( self._normalized_keys(":env:", self.get_environ_vars()) ) - def _normalized_keys(self, section, items): - # type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any] + def _normalized_keys( + self, section: str, items: Iterable[Tuple[str, Any]] + ) -> Dict[str, Any]: """Normalizes items to construct a dictionary with normalized keys. This routine is where the names become keys and are made the same @@ -331,8 +315,7 @@ class Configuration: normalized[key] = val return normalized - def get_environ_vars(self): - # type: () -> Iterable[Tuple[str, str]] + def get_environ_vars(self) -> Iterable[Tuple[str, str]]: """Returns a generator with all environmental vars with prefix PIP_""" for key, val in os.environ.items(): if key.startswith("PIP_"): @@ -341,8 +324,7 @@ class Configuration: yield name, val # XXX: This is patched in the tests. - def iter_config_files(self): - # type: () -> Iterable[Tuple[Kind, List[str]]] + def iter_config_files(self) -> Iterable[Tuple[Kind, List[str]]]: """Yields variant and configuration files associated with it. This should be treated like items of a dictionary. @@ -350,7 +332,7 @@ class Configuration: # SMELL: Move the conditions out of this function # environment variables have the lowest priority - config_file = os.environ.get('PIP_CONFIG_FILE', None) + config_file = os.environ.get("PIP_CONFIG_FILE", None) if config_file is not None: yield kinds.ENV, [config_file] else: @@ -372,13 +354,11 @@ class Configuration: # finally virtualenv configuration first trumping others yield kinds.SITE, config_files[kinds.SITE] - def get_values_in_config(self, variant): - # type: (Kind) -> Dict[str, Any] + def get_values_in_config(self, variant: Kind) -> Dict[str, Any]: """Get values present in a config file""" return self._config[variant] - def _get_parser_to_modify(self): - # type: () -> Tuple[str, RawConfigParser] + def _get_parser_to_modify(self) -> Tuple[str, RawConfigParser]: # Determine which parser to modify assert self.load_only parsers = self._parsers[self.load_only] @@ -392,12 +372,10 @@ class Configuration: return parsers[-1] # XXX: This is patched in the tests. - def _mark_as_modified(self, fname, parser): - # type: (str, RawConfigParser) -> None + def _mark_as_modified(self, fname: str, parser: RawConfigParser) -> None: file_parser_tuple = (fname, parser) if file_parser_tuple not in self._modified_parsers: self._modified_parsers.append(file_parser_tuple) - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return f"{self.__class__.__name__}({self._dictionary!r})" diff --git a/env/Lib/site-packages/pip/_internal/distributions/__init__.py b/env/Lib/site-packages/pip/_internal/distributions/__init__.py index a222f248f341aa41a446a45839783d3be55104fb..9a89a838b9a5cb264e9ae9d269fbedca6e2d6333 100644 --- a/env/Lib/site-packages/pip/_internal/distributions/__init__.py +++ b/env/Lib/site-packages/pip/_internal/distributions/__init__.py @@ -4,8 +4,9 @@ from pip._internal.distributions.wheel import WheelDistribution from pip._internal.req.req_install import InstallRequirement -def make_distribution_for_install_requirement(install_req): - # type: (InstallRequirement) -> AbstractDistribution +def make_distribution_for_install_requirement( + install_req: InstallRequirement, +) -> AbstractDistribution: """Returns a Distribution for the given InstallRequirement""" # Editable requirements will always be source distributions. They use the # legacy logic until we create a modern standard for them. diff --git a/env/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 22cfea607a2612213cf9fd6af0869fe66f430c81..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-39.pyc deleted file mode 100644 index ca1348f8b68113bf2f0bcdd08af1f0b2a626cbfe..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/distributions/__pycache__/base.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-39.pyc deleted file mode 100644 index bf4c62f802ccbd67d548592d650a465dacbe934c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-39.pyc deleted file mode 100644 index 4444383634cc4cff58b6e2d53db64ccce1a260e7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-39.pyc deleted file mode 100644 index 04dee280ffb115c30d1eebd36b931e88e6cc71bd..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/distributions/base.py b/env/Lib/site-packages/pip/_internal/distributions/base.py index 78ee91e76f11067138a41c173c4ce77dd6c7c790..75ce2dc9057a20a957abe2fbd4ef094dc4196684 100644 --- a/env/Lib/site-packages/pip/_internal/distributions/base.py +++ b/env/Lib/site-packages/pip/_internal/distributions/base.py @@ -1,9 +1,7 @@ import abc -from typing import Optional - -from pip._vendor.pkg_resources import Distribution from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata.base import BaseDistribution from pip._internal.req import InstallRequirement @@ -23,17 +21,19 @@ class AbstractDistribution(metaclass=abc.ABCMeta): above metadata. """ - def __init__(self, req): - # type: (InstallRequirement) -> None + def __init__(self, req: InstallRequirement) -> None: super().__init__() self.req = req @abc.abstractmethod - def get_pkg_resources_distribution(self): - # type: () -> Optional[Distribution] + def get_metadata_distribution(self) -> BaseDistribution: raise NotImplementedError() @abc.abstractmethod - def prepare_distribution_metadata(self, finder, build_isolation): - # type: (PackageFinder, bool) -> None + def prepare_distribution_metadata( + self, + finder: PackageFinder, + build_isolation: bool, + check_build_deps: bool, + ) -> None: raise NotImplementedError() diff --git a/env/Lib/site-packages/pip/_internal/distributions/installed.py b/env/Lib/site-packages/pip/_internal/distributions/installed.py index b19dfacb4dbe342b440af3b6721f653d69d19ad5..edb38aa1a6c54dcb73e2f74b6bdfff337841d99f 100644 --- a/env/Lib/site-packages/pip/_internal/distributions/installed.py +++ b/env/Lib/site-packages/pip/_internal/distributions/installed.py @@ -1,9 +1,6 @@ -from typing import Optional - -from pip._vendor.pkg_resources import Distribution - from pip._internal.distributions.base import AbstractDistribution from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import BaseDistribution class InstalledDistribution(AbstractDistribution): @@ -13,10 +10,14 @@ class InstalledDistribution(AbstractDistribution): been computed. """ - def get_pkg_resources_distribution(self): - # type: () -> Optional[Distribution] + def get_metadata_distribution(self) -> BaseDistribution: + assert self.req.satisfied_by is not None, "not actually installed" return self.req.satisfied_by - def prepare_distribution_metadata(self, finder, build_isolation): - # type: (PackageFinder, bool) -> None + def prepare_distribution_metadata( + self, + finder: PackageFinder, + build_isolation: bool, + check_build_deps: bool, + ) -> None: pass diff --git a/env/Lib/site-packages/pip/_internal/distributions/sdist.py b/env/Lib/site-packages/pip/_internal/distributions/sdist.py index c873a9f10e112987d2feafe7ad39633e23662d3c..4c25647930c6557d10e8a3ee92b68cfe3a07f7d7 100644 --- a/env/Lib/site-packages/pip/_internal/distributions/sdist.py +++ b/env/Lib/site-packages/pip/_internal/distributions/sdist.py @@ -1,12 +1,11 @@ import logging -from typing import Set, Tuple - -from pip._vendor.pkg_resources import Distribution +from typing import Iterable, Set, Tuple from pip._internal.build_env import BuildEnvironment from pip._internal.distributions.base import AbstractDistribution from pip._internal.exceptions import InstallationError from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import BaseDistribution from pip._internal.utils.subprocess import runner_with_spinner_message logger = logging.getLogger(__name__) @@ -19,40 +18,49 @@ class SourceDistribution(AbstractDistribution): generated, either using PEP 517 or using the legacy `setup.py egg_info`. """ - def get_pkg_resources_distribution(self): - # type: () -> Distribution + def get_metadata_distribution(self) -> BaseDistribution: return self.req.get_dist() - def prepare_distribution_metadata(self, finder, build_isolation): - # type: (PackageFinder, bool) -> None + def prepare_distribution_metadata( + self, + finder: PackageFinder, + build_isolation: bool, + check_build_deps: bool, + ) -> None: # Load pyproject.toml, to determine whether PEP 517 is to be used self.req.load_pyproject_toml() # Set up the build isolation, if this requirement should be isolated should_isolate = self.req.use_pep517 and build_isolation if should_isolate: - self._setup_isolation(finder) - - self.req.prepare_metadata() - - def _setup_isolation(self, finder): - # type: (PackageFinder) -> None - def _raise_conflicts(conflicting_with, conflicting_reqs): - # type: (str, Set[Tuple[str, str]]) -> None - format_string = ( - "Some build dependencies for {requirement} " - "conflict with {conflicting_with}: {description}." + # Setup an isolated environment and install the build backend static + # requirements in it. + self._prepare_build_backend(finder) + # Check that if the requirement is editable, it either supports PEP 660 or + # has a setup.py or a setup.cfg. This cannot be done earlier because we need + # to setup the build backend to verify it supports build_editable, nor can + # it be done later, because we want to avoid installing build requirements + # needlessly. Doing it here also works around setuptools generating + # UNKNOWN.egg-info when running get_requires_for_build_wheel on a directory + # without setup.py nor setup.cfg. + self.req.isolated_editable_sanity_check() + # Install the dynamic build requirements. + self._install_build_reqs(finder) + # Check if the current environment provides build dependencies + should_check_deps = self.req.use_pep517 and check_build_deps + if should_check_deps: + pyproject_requires = self.req.pyproject_requires + assert pyproject_requires is not None + conflicting, missing = self.req.build_env.check_requirements( + pyproject_requires ) - error_message = format_string.format( - requirement=self.req, - conflicting_with=conflicting_with, - description=", ".join( - f"{installed} is incompatible with {wanted}" - for installed, wanted in sorted(conflicting) - ), - ) - raise InstallationError(error_message) + if conflicting: + self._raise_conflicts("the backend dependencies", conflicting) + if missing: + self._raise_missing_reqs(missing) + self.req.prepare_metadata() + def _prepare_build_backend(self, finder: PackageFinder) -> None: # Isolate in a BuildEnvironment and install the build-time # requirements. pyproject_requires = self.req.pyproject_requires @@ -60,13 +68,13 @@ class SourceDistribution(AbstractDistribution): self.req.build_env = BuildEnvironment() self.req.build_env.install_requirements( - finder, pyproject_requires, "overlay", "Installing build dependencies" + finder, pyproject_requires, "overlay", kind="build dependencies" ) conflicting, missing = self.req.build_env.check_requirements( self.req.requirements_to_check ) if conflicting: - _raise_conflicts("PEP 517/518 supported requirements", conflicting) + self._raise_conflicts("PEP 517/518 supported requirements", conflicting) if missing: logger.warning( "Missing build requirements in pyproject.toml for %s.", @@ -77,19 +85,66 @@ class SourceDistribution(AbstractDistribution): "pip cannot fall back to setuptools without %s.", " and ".join(map(repr, sorted(missing))), ) - # Install any extra build dependencies that the backend requests. - # This must be done in a second pass, as the pyproject.toml - # dependencies must be installed before we can call the backend. + + def _get_build_requires_wheel(self) -> Iterable[str]: with self.req.build_env: runner = runner_with_spinner_message("Getting requirements to build wheel") backend = self.req.pep517_backend assert backend is not None with backend.subprocess_runner(runner): - reqs = backend.get_requires_for_build_wheel() + return backend.get_requires_for_build_wheel() - conflicting, missing = self.req.build_env.check_requirements(reqs) + def _get_build_requires_editable(self) -> Iterable[str]: + with self.req.build_env: + runner = runner_with_spinner_message( + "Getting requirements to build editable" + ) + backend = self.req.pep517_backend + assert backend is not None + with backend.subprocess_runner(runner): + return backend.get_requires_for_build_editable() + + def _install_build_reqs(self, finder: PackageFinder) -> None: + # Install any extra build dependencies that the backend requests. + # This must be done in a second pass, as the pyproject.toml + # dependencies must be installed before we can call the backend. + if ( + self.req.editable + and self.req.permit_editable_wheels + and self.req.supports_pyproject_editable() + ): + build_reqs = self._get_build_requires_editable() + else: + build_reqs = self._get_build_requires_wheel() + conflicting, missing = self.req.build_env.check_requirements(build_reqs) if conflicting: - _raise_conflicts("the backend dependencies", conflicting) + self._raise_conflicts("the backend dependencies", conflicting) self.req.build_env.install_requirements( - finder, missing, "normal", "Installing backend dependencies" + finder, missing, "normal", kind="backend dependencies" + ) + + def _raise_conflicts( + self, conflicting_with: str, conflicting_reqs: Set[Tuple[str, str]] + ) -> None: + format_string = ( + "Some build dependencies for {requirement} " + "conflict with {conflicting_with}: {description}." + ) + error_message = format_string.format( + requirement=self.req, + conflicting_with=conflicting_with, + description=", ".join( + f"{installed} is incompatible with {wanted}" + for installed, wanted in sorted(conflicting_reqs) + ), + ) + raise InstallationError(error_message) + + def _raise_missing_reqs(self, missing: Set[str]) -> None: + format_string = ( + "Some build dependencies for {requirement} are missing: {missing}." + ) + error_message = format_string.format( + requirement=self.req, missing=", ".join(map(repr, sorted(missing))) ) + raise InstallationError(error_message) diff --git a/env/Lib/site-packages/pip/_internal/distributions/wheel.py b/env/Lib/site-packages/pip/_internal/distributions/wheel.py index d0384797b46d2fc8a67d54ac92919d8c7de277e1..03aac775b53f2dd3153a9f44829e7987258950aa 100644 --- a/env/Lib/site-packages/pip/_internal/distributions/wheel.py +++ b/env/Lib/site-packages/pip/_internal/distributions/wheel.py @@ -1,10 +1,12 @@ -from zipfile import ZipFile - -from pip._vendor.pkg_resources import Distribution +from pip._vendor.packaging.utils import canonicalize_name from pip._internal.distributions.base import AbstractDistribution from pip._internal.index.package_finder import PackageFinder -from pip._internal.utils.wheel import pkg_resources_distribution_for_wheel +from pip._internal.metadata import ( + BaseDistribution, + FilesystemWheel, + get_wheel_distribution, +) class WheelDistribution(AbstractDistribution): @@ -13,22 +15,20 @@ class WheelDistribution(AbstractDistribution): This does not need any preparation as wheels can be directly unpacked. """ - def get_pkg_resources_distribution(self): - # type: () -> Distribution + def get_metadata_distribution(self) -> BaseDistribution: """Loads the metadata from the wheel file into memory and returns a Distribution that uses it, not relying on the wheel file or requirement. """ - # Set as part of preparation during download. - assert self.req.local_file_path - # Wheels are never unnamed. - assert self.req.name - - with ZipFile(self.req.local_file_path, allowZip64=True) as z: - return pkg_resources_distribution_for_wheel( - z, self.req.name, self.req.local_file_path - ) - - def prepare_distribution_metadata(self, finder, build_isolation): - # type: (PackageFinder, bool) -> None + assert self.req.local_file_path, "Set as part of preparation during download" + assert self.req.name, "Wheels are never unnamed" + wheel = FilesystemWheel(self.req.local_file_path) + return get_wheel_distribution(wheel, canonicalize_name(self.req.name)) + + def prepare_distribution_metadata( + self, + finder: PackageFinder, + build_isolation: bool, + check_build_deps: bool, + ) -> None: pass diff --git a/env/Lib/site-packages/pip/_internal/exceptions.py b/env/Lib/site-packages/pip/_internal/exceptions.py index 8aacf8120147c457dccd4b955fd898c3f5a90ba9..d95fe44b34a936dc178c89d98ee9ef093cb0fccb 100644 --- a/env/Lib/site-packages/pip/_internal/exceptions.py +++ b/env/Lib/site-packages/pip/_internal/exceptions.py @@ -1,22 +1,181 @@ -"""Exceptions used throughout package""" +"""Exceptions used throughout package. + +This module MUST NOT try to import from anything within `pip._internal` to +operate. This is expected to be importable from any/all files within the +subpackage and, thus, should not depend on them. +""" import configparser +import contextlib +import locale +import logging +import pathlib +import re +import sys from itertools import chain, groupby, repeat -from typing import TYPE_CHECKING, Dict, List, Optional +from typing import TYPE_CHECKING, Dict, Iterator, List, Optional, Union -from pip._vendor.pkg_resources import Distribution from pip._vendor.requests.models import Request, Response +from pip._vendor.rich.console import Console, ConsoleOptions, RenderResult +from pip._vendor.rich.markup import escape +from pip._vendor.rich.text import Text if TYPE_CHECKING: from hashlib import _Hash + from typing import Literal + from pip._internal.metadata import BaseDistribution from pip._internal.req.req_install import InstallRequirement +logger = logging.getLogger(__name__) + + +# +# Scaffolding +# +def _is_kebab_case(s: str) -> bool: + return re.match(r"^[a-z]+(-[a-z]+)*$", s) is not None + + +def _prefix_with_indent( + s: Union[Text, str], + console: Console, + *, + prefix: str, + indent: str, +) -> Text: + if isinstance(s, Text): + text = s + else: + text = console.render_str(s) + + return console.render_str(prefix, overflow="ignore") + console.render_str( + f"\n{indent}", overflow="ignore" + ).join(text.split(allow_blank=True)) + class PipError(Exception): - """Base pip exception""" + """The base pip error.""" + + +class DiagnosticPipError(PipError): + """An error, that presents diagnostic information to the user. + + This contains a bunch of logic, to enable pretty presentation of our error + messages. Each error gets a unique reference. Each error can also include + additional context, a hint and/or a note -- which are presented with the + main error message in a consistent style. + + This is adapted from the error output styling in `sphinx-theme-builder`. + """ + + reference: str + + def __init__( + self, + *, + kind: 'Literal["error", "warning"]' = "error", + reference: Optional[str] = None, + message: Union[str, Text], + context: Optional[Union[str, Text]], + hint_stmt: Optional[Union[str, Text]], + note_stmt: Optional[Union[str, Text]] = None, + link: Optional[str] = None, + ) -> None: + # Ensure a proper reference is provided. + if reference is None: + assert hasattr(self, "reference"), "error reference not provided!" + reference = self.reference + assert _is_kebab_case(reference), "error reference must be kebab-case!" + + self.kind = kind + self.reference = reference + + self.message = message + self.context = context + + self.note_stmt = note_stmt + self.hint_stmt = hint_stmt + + self.link = link + + super().__init__(f"<{self.__class__.__name__}: {self.reference}>") + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__}(" + f"reference={self.reference!r}, " + f"message={self.message!r}, " + f"context={self.context!r}, " + f"note_stmt={self.note_stmt!r}, " + f"hint_stmt={self.hint_stmt!r}" + ")>" + ) + + def __rich_console__( + self, + console: Console, + options: ConsoleOptions, + ) -> RenderResult: + colour = "red" if self.kind == "error" else "yellow" + + yield f"[{colour} bold]{self.kind}[/]: [bold]{self.reference}[/]" + yield "" + + if not options.ascii_only: + # Present the main message, with relevant context indented. + if self.context is not None: + yield _prefix_with_indent( + self.message, + console, + prefix=f"[{colour}]×[/] ", + indent=f"[{colour}]│[/] ", + ) + yield _prefix_with_indent( + self.context, + console, + prefix=f"[{colour}]╰─>[/] ", + indent=f"[{colour}] [/] ", + ) + else: + yield _prefix_with_indent( + self.message, + console, + prefix="[red]×[/] ", + indent=" ", + ) + else: + yield self.message + if self.context is not None: + yield "" + yield self.context + + if self.note_stmt is not None or self.hint_stmt is not None: + yield "" + + if self.note_stmt is not None: + yield _prefix_with_indent( + self.note_stmt, + console, + prefix="[magenta bold]note[/]: ", + indent=" ", + ) + if self.hint_stmt is not None: + yield _prefix_with_indent( + self.hint_stmt, + console, + prefix="[cyan bold]hint[/]: ", + indent=" ", + ) + + if self.link is not None: + yield "" + yield f"Link: {self.link}" +# +# Actual Errors +# class ConfigurationError(PipError): """General exception in configuration""" @@ -29,17 +188,54 @@ class UninstallationError(PipError): """General exception during uninstallation""" +class MissingPyProjectBuildRequires(DiagnosticPipError): + """Raised when pyproject.toml has `build-system`, but no `build-system.requires`.""" + + reference = "missing-pyproject-build-system-requires" + + def __init__(self, *, package: str) -> None: + super().__init__( + message=f"Can not process {escape(package)}", + context=Text( + "This package has an invalid pyproject.toml file.\n" + "The [build-system] table is missing the mandatory `requires` key." + ), + note_stmt="This is an issue with the package mentioned above, not pip.", + hint_stmt=Text("See PEP 518 for the detailed specification."), + ) + + +class InvalidPyProjectBuildRequires(DiagnosticPipError): + """Raised when pyproject.toml an invalid `build-system.requires`.""" + + reference = "invalid-pyproject-build-system-requires" + + def __init__(self, *, package: str, reason: str) -> None: + super().__init__( + message=f"Can not process {escape(package)}", + context=Text( + "This package has an invalid `build-system.requires` key in " + f"pyproject.toml.\n{reason}" + ), + note_stmt="This is an issue with the package mentioned above, not pip.", + hint_stmt=Text("See PEP 518 for the detailed specification."), + ) + + class NoneMetadataError(PipError): - """ - Raised when accessing "METADATA" or "PKG-INFO" metadata for a - pip._vendor.pkg_resources.Distribution object and - `dist.has_metadata('METADATA')` returns True but - `dist.get_metadata('METADATA')` returns None (and similarly for - "PKG-INFO"). + """Raised when accessing a Distribution's "METADATA" or "PKG-INFO". + + This signifies an inconsistency, when the Distribution claims to have + the metadata file (if not, raise ``FileNotFoundError`` instead), but is + not actually able to produce its content. This may be due to permission + errors. """ - def __init__(self, dist, metadata_name): - # type: (Distribution, str) -> None + def __init__( + self, + dist: "BaseDistribution", + metadata_name: str, + ) -> None: """ :param dist: A Distribution object. :param metadata_name: The name of the metadata being accessed @@ -48,28 +244,24 @@ class NoneMetadataError(PipError): self.dist = dist self.metadata_name = metadata_name - def __str__(self): - # type: () -> str + def __str__(self) -> str: # Use `dist` in the error message because its stringification # includes more information, like the version and location. - return ( - 'None {} metadata found for distribution: {}'.format( - self.metadata_name, self.dist, - ) + return "None {} metadata found for distribution: {}".format( + self.metadata_name, + self.dist, ) class UserInstallationInvalid(InstallationError): """A --user install is requested on an environment without user site.""" - def __str__(self): - # type: () -> str + def __str__(self) -> str: return "User base directory is not specified" class InvalidSchemeCombination(InstallationError): - def __str__(self): - # type: () -> str + def __str__(self) -> str: before = ", ".join(str(a) for a in self.args[:-1]) return f"Cannot set {before} and {self.args[-1]} together" @@ -102,8 +294,12 @@ class PreviousBuildDirError(PipError): class NetworkConnectionError(PipError): """HTTP connection error""" - def __init__(self, error_msg, response=None, request=None): - # type: (str, Response, Request) -> None + def __init__( + self, + error_msg: str, + response: Optional[Response] = None, + request: Optional[Request] = None, + ) -> None: """ Initialize NetworkConnectionError with `request` and `response` objects. @@ -111,13 +307,15 @@ class NetworkConnectionError(PipError): self.response = response self.request = request self.error_msg = error_msg - if (self.response is not None and not self.request and - hasattr(response, 'request')): + if ( + self.response is not None + and not self.request + and hasattr(response, "request") + ): self.request = self.response.request super().__init__(error_msg, response, request) - def __str__(self): - # type: () -> str + def __str__(self) -> str: return str(self.error_msg) @@ -129,74 +327,122 @@ class UnsupportedWheel(InstallationError): """Unsupported wheel.""" +class InvalidWheel(InstallationError): + """Invalid (e.g. corrupt) wheel.""" + + def __init__(self, location: str, name: str): + self.location = location + self.name = name + + def __str__(self) -> str: + return f"Wheel '{self.name}' located at {self.location} is invalid." + + class MetadataInconsistent(InstallationError): """Built metadata contains inconsistent information. This is raised when the metadata contains values (e.g. name and version) - that do not match the information previously obtained from sdist filename - or user-supplied ``#egg=`` value. + that do not match the information previously obtained from sdist filename, + user-supplied ``#egg=`` value, or an install requirement name. """ - def __init__(self, ireq, field, f_val, m_val): - # type: (InstallRequirement, str, str, str) -> None + + def __init__( + self, ireq: "InstallRequirement", field: str, f_val: str, m_val: str + ) -> None: self.ireq = ireq self.field = field self.f_val = f_val self.m_val = m_val - def __str__(self): - # type: () -> str - template = ( - "Requested {} has inconsistent {}: " - "filename has {!r}, but metadata has {!r}" + def __str__(self) -> str: + return ( + f"Requested {self.ireq} has inconsistent {self.field}: " + f"expected {self.f_val!r}, but metadata has {self.m_val!r}" ) - return template.format(self.ireq, self.field, self.f_val, self.m_val) -class InstallationSubprocessError(InstallationError): - """A subprocess call failed during installation.""" - def __init__(self, returncode, description): - # type: (int, str) -> None - self.returncode = returncode - self.description = description +class InstallationSubprocessError(DiagnosticPipError, InstallationError): + """A subprocess call failed.""" - def __str__(self): - # type: () -> str - return ( - "Command errored out with exit status {}: {} " - "Check the logs for full command output." - ).format(self.returncode, self.description) + reference = "subprocess-exited-with-error" + + def __init__( + self, + *, + command_description: str, + exit_code: int, + output_lines: Optional[List[str]], + ) -> None: + if output_lines is None: + output_prompt = Text("See above for output.") + else: + output_prompt = ( + Text.from_markup(f"[red][{len(output_lines)} lines of output][/]\n") + + Text("".join(output_lines)) + + Text.from_markup(R"[red]\[end of output][/]") + ) + + super().__init__( + message=( + f"[green]{escape(command_description)}[/] did not run successfully.\n" + f"exit code: {exit_code}" + ), + context=output_prompt, + hint_stmt=None, + note_stmt=( + "This error originates from a subprocess, and is likely not a " + "problem with pip." + ), + ) + + self.command_description = command_description + self.exit_code = exit_code + + def __str__(self) -> str: + return f"{self.command_description} exited with {self.exit_code}" + + +class MetadataGenerationFailed(InstallationSubprocessError, InstallationError): + reference = "metadata-generation-failed" + + def __init__( + self, + *, + package_details: str, + ) -> None: + super(InstallationSubprocessError, self).__init__( + message="Encountered error while generating package metadata.", + context=escape(package_details), + hint_stmt="See above for details.", + note_stmt="This is an issue with the package mentioned above, not pip.", + ) + + def __str__(self) -> str: + return "metadata generation failed" class HashErrors(InstallationError): """Multiple HashError instances rolled into one for reporting""" - def __init__(self): - # type: () -> None - self.errors = [] # type: List[HashError] + def __init__(self) -> None: + self.errors: List["HashError"] = [] - def append(self, error): - # type: (HashError) -> None + def append(self, error: "HashError") -> None: self.errors.append(error) - def __str__(self): - # type: () -> str + def __str__(self) -> str: lines = [] self.errors.sort(key=lambda e: e.order) for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__): lines.append(cls.head) lines.extend(e.body() for e in errors_of_cls) if lines: - return '\n'.join(lines) - return '' + return "\n".join(lines) + return "" - def __nonzero__(self): - # type: () -> bool + def __bool__(self) -> bool: return bool(self.errors) - def __bool__(self): - # type: () -> bool - return self.__nonzero__() - class HashError(InstallationError): """ @@ -214,12 +460,12 @@ class HashError(InstallationError): typically available earlier. """ - req = None # type: Optional[InstallRequirement] - head = '' - order = -1 # type: int - def body(self): - # type: () -> str + req: Optional["InstallRequirement"] = None + head = "" + order: int = -1 + + def body(self) -> str: """Return a summary of me for display under the heading. This default implementation simply prints a description of the @@ -229,21 +475,19 @@ class HashError(InstallationError): its link already populated by the resolver's _populate_link(). """ - return f' {self._requirement_name()}' + return f" {self._requirement_name()}" - def __str__(self): - # type: () -> str - return f'{self.head}\n{self.body()}' + def __str__(self) -> str: + return f"{self.head}\n{self.body()}" - def _requirement_name(self): - # type: () -> str + def _requirement_name(self) -> str: """Return a description of the requirement that triggered me. This default implementation returns long description of the req, with line numbers """ - return str(self.req) if self.req else 'unknown package' + return str(self.req) if self.req else "unknown package" class VcsHashUnsupported(HashError): @@ -251,8 +495,10 @@ class VcsHashUnsupported(HashError): we don't have a method for hashing those.""" order = 0 - head = ("Can't verify hashes for these requirements because we don't " - "have a way to hash version control repositories:") + head = ( + "Can't verify hashes for these requirements because we don't " + "have a way to hash version control repositories:" + ) class DirectoryUrlHashUnsupported(HashError): @@ -260,32 +506,34 @@ class DirectoryUrlHashUnsupported(HashError): we don't have a method for hashing those.""" order = 1 - head = ("Can't verify hashes for these file:// requirements because they " - "point to directories:") + head = ( + "Can't verify hashes for these file:// requirements because they " + "point to directories:" + ) class HashMissing(HashError): """A hash was needed for a requirement but is absent.""" order = 2 - head = ('Hashes are required in --require-hashes mode, but they are ' - 'missing from some requirements. Here is a list of those ' - 'requirements along with the hashes their downloaded archives ' - 'actually had. Add lines like these to your requirements files to ' - 'prevent tampering. (If you did not enable --require-hashes ' - 'manually, note that it turns on automatically when any package ' - 'has a hash.)') - - def __init__(self, gotten_hash): - # type: (str) -> None + head = ( + "Hashes are required in --require-hashes mode, but they are " + "missing from some requirements. Here is a list of those " + "requirements along with the hashes their downloaded archives " + "actually had. Add lines like these to your requirements files to " + "prevent tampering. (If you did not enable --require-hashes " + "manually, note that it turns on automatically when any package " + "has a hash.)" + ) + + def __init__(self, gotten_hash: str) -> None: """ :param gotten_hash: The hash of the (possibly malicious) archive we just downloaded """ self.gotten_hash = gotten_hash - def body(self): - # type: () -> str + def body(self) -> str: # Dodge circular import. from pip._internal.utils.hashes import FAVORITE_HASH @@ -294,13 +542,16 @@ class HashMissing(HashError): # In the case of URL-based requirements, display the original URL # seen in the requirements file rather than the package name, # so the output can be directly copied into the requirements file. - package = (self.req.original_link if self.req.original_link - # In case someone feeds something downright stupid - # to InstallRequirement's constructor. - else getattr(self.req, 'req', None)) - return ' {} --hash={}:{}'.format(package or 'unknown package', - FAVORITE_HASH, - self.gotten_hash) + package = ( + self.req.original_link + if self.req.is_direct + # In case someone feeds something downright stupid + # to InstallRequirement's constructor. + else getattr(self.req, "req", None) + ) + return " {} --hash={}:{}".format( + package or "unknown package", FAVORITE_HASH, self.gotten_hash + ) class HashUnpinned(HashError): @@ -308,8 +559,10 @@ class HashUnpinned(HashError): version.""" order = 3 - head = ('In --require-hashes mode, all requirements must have their ' - 'versions pinned with ==. These do not:') + head = ( + "In --require-hashes mode, all requirements must have their " + "versions pinned with ==. These do not:" + ) class HashMismatch(HashError): @@ -321,14 +574,16 @@ class HashMismatch(HashError): improve its error message. """ - order = 4 - head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS ' - 'FILE. If you have updated the package versions, please update ' - 'the hashes. Otherwise, examine the package contents carefully; ' - 'someone may have tampered with them.') - def __init__(self, allowed, gots): - # type: (Dict[str, List[str]], Dict[str, _Hash]) -> None + order = 4 + head = ( + "THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS " + "FILE. If you have updated the package versions, please update " + "the hashes. Otherwise, examine the package contents carefully; " + "someone may have tampered with them." + ) + + def __init__(self, allowed: Dict[str, List[str]], gots: Dict[str, "_Hash"]) -> None: """ :param allowed: A dict of algorithm names pointing to lists of allowed hex digests @@ -338,13 +593,10 @@ class HashMismatch(HashError): self.allowed = allowed self.gots = gots - def body(self): - # type: () -> str - return ' {}:\n{}'.format(self._requirement_name(), - self._hash_comparison()) + def body(self) -> str: + return " {}:\n{}".format(self._requirement_name(), self._hash_comparison()) - def _hash_comparison(self): - # type: () -> str + def _hash_comparison(self) -> str: """ Return a comparison of actual and expected hash values. @@ -355,20 +607,22 @@ class HashMismatch(HashError): Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef """ - def hash_then_or(hash_name): - # type: (str) -> chain[str] + + def hash_then_or(hash_name: str) -> "chain[str]": # For now, all the decent hashes have 6-char names, so we can get # away with hard-coding space literals. - return chain([hash_name], repeat(' or')) + return chain([hash_name], repeat(" or")) - lines = [] # type: List[str] + lines: List[str] = [] for hash_name, expecteds in self.allowed.items(): prefix = hash_then_or(hash_name) - lines.extend((' Expected {} {}'.format(next(prefix), e)) - for e in expecteds) - lines.append(' Got {}\n'.format( - self.gots[hash_name].hexdigest())) - return '\n'.join(lines) + lines.extend( + (" Expected {} {}".format(next(prefix), e)) for e in expecteds + ) + lines.append( + " Got {}\n".format(self.gots[hash_name].hexdigest()) + ) + return "\n".join(lines) class UnsupportedPythonVersion(InstallationError): @@ -377,21 +631,103 @@ class UnsupportedPythonVersion(InstallationError): class ConfigurationFileCouldNotBeLoaded(ConfigurationError): - """When there are errors while loading a configuration file - """ - - def __init__(self, reason="could not be loaded", fname=None, error=None): - # type: (str, Optional[str], Optional[configparser.Error]) -> None + """When there are errors while loading a configuration file""" + + def __init__( + self, + reason: str = "could not be loaded", + fname: Optional[str] = None, + error: Optional[configparser.Error] = None, + ) -> None: super().__init__(error) self.reason = reason self.fname = fname self.error = error - def __str__(self): - # type: () -> str + def __str__(self) -> str: if self.fname is not None: message_part = f" in {self.fname}." else: assert self.error is not None message_part = f".\n{self.error}\n" return f"Configuration file {self.reason}{message_part}" + + +_DEFAULT_EXTERNALLY_MANAGED_ERROR = f"""\ +The Python environment under {sys.prefix} is managed externally, and may not be +manipulated by the user. Please use specific tooling from the distributor of +the Python installation to interact with this environment instead. +""" + + +class ExternallyManagedEnvironment(DiagnosticPipError): + """The current environment is externally managed. + + This is raised when the current environment is externally managed, as + defined by `PEP 668`_. The ``EXTERNALLY-MANAGED`` configuration is checked + and displayed when the error is bubbled up to the user. + + :param error: The error message read from ``EXTERNALLY-MANAGED``. + """ + + reference = "externally-managed-environment" + + def __init__(self, error: Optional[str]) -> None: + if error is None: + context = Text(_DEFAULT_EXTERNALLY_MANAGED_ERROR) + else: + context = Text(error) + super().__init__( + message="This environment is externally managed", + context=context, + note_stmt=( + "If you believe this is a mistake, please contact your " + "Python installation or OS distribution provider. " + "You can override this, at the risk of breaking your Python " + "installation or OS, by passing --break-system-packages." + ), + hint_stmt=Text("See PEP 668 for the detailed specification."), + ) + + @staticmethod + def _iter_externally_managed_error_keys() -> Iterator[str]: + # LC_MESSAGES is in POSIX, but not the C standard. The most common + # platform that does not implement this category is Windows, where + # using other categories for console message localization is equally + # unreliable, so we fall back to the locale-less vendor message. This + # can always be re-evaluated when a vendor proposes a new alternative. + try: + category = locale.LC_MESSAGES + except AttributeError: + lang: Optional[str] = None + else: + lang, _ = locale.getlocale(category) + if lang is not None: + yield f"Error-{lang}" + for sep in ("-", "_"): + before, found, _ = lang.partition(sep) + if not found: + continue + yield f"Error-{before}" + yield "Error" + + @classmethod + def from_config( + cls, + config: Union[pathlib.Path, str], + ) -> "ExternallyManagedEnvironment": + parser = configparser.ConfigParser(interpolation=None) + try: + parser.read(config, encoding="utf-8") + section = parser["externally-managed"] + for key in cls._iter_externally_managed_error_keys(): + with contextlib.suppress(KeyError): + return cls(section[key]) + except KeyError: + pass + except (OSError, UnicodeDecodeError, configparser.ParsingError): + from pip._internal.utils._log import VERBOSE + + exc_info = logger.isEnabledFor(VERBOSE) + logger.warning("Failed to read %s", config, exc_info=exc_info) + return cls(None) diff --git a/env/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 939ce339da6f6dd46dc49dfe7dc6f01388fc675d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/index/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-39.pyc deleted file mode 100644 index 27d81be3d6ba5610338b7779be4547135d610ef2..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/index/__pycache__/collector.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-39.pyc deleted file mode 100644 index 5dfc01e38d37202e7a989098e24e25fbc388311d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-39.pyc deleted file mode 100644 index 95716c84af0063ff8579f014fe617974883333f9..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/index/__pycache__/sources.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/index/collector.py b/env/Lib/site-packages/pip/_internal/index/collector.py index 0721e3683f99139a86895731c8f007c40639776e..b3e293ea3a508dc54674349e845f9794118f548b 100644 --- a/env/Lib/site-packages/pip/_internal/index/collector.py +++ b/env/Lib/site-packages/pip/_internal/index/collector.py @@ -2,30 +2,32 @@ The main purpose of this module is to expose LinkCollector.collect_sources(). """ -import cgi import collections +import email.message import functools -import html import itertools +import json import logging import os -import re import urllib.parse import urllib.request -import xml.etree.ElementTree +from html.parser import HTMLParser from optparse import Values from typing import ( + TYPE_CHECKING, Callable, + Dict, Iterable, List, MutableMapping, NamedTuple, Optional, Sequence, + Tuple, Union, ) -from pip._vendor import html5lib, requests +from pip._vendor import requests from pip._vendor.requests import Response from pip._vendor.requests.exceptions import RetryError, SSLError @@ -35,92 +37,111 @@ from pip._internal.models.search_scope import SearchScope from pip._internal.network.session import PipSession from pip._internal.network.utils import raise_for_status from pip._internal.utils.filetypes import is_archive_file -from pip._internal.utils.misc import pairwise, redact_auth_from_url +from pip._internal.utils.misc import redact_auth_from_url from pip._internal.vcs import vcs from .sources import CandidatesFromPage, LinkSource, build_source +if TYPE_CHECKING: + from typing import Protocol +else: + Protocol = object + logger = logging.getLogger(__name__) -HTMLElement = xml.etree.ElementTree.Element ResponseHeaders = MutableMapping[str, str] -def _match_vcs_scheme(url): - # type: (str) -> Optional[str] +def _match_vcs_scheme(url: str) -> Optional[str]: """Look for VCS schemes in the URL. Returns the matched VCS scheme, or None if there's no match. """ for scheme in vcs.schemes: - if url.lower().startswith(scheme) and url[len(scheme)] in '+:': + if url.lower().startswith(scheme) and url[len(scheme)] in "+:": return scheme return None -class _NotHTML(Exception): - def __init__(self, content_type, request_desc): - # type: (str, str) -> None +class _NotAPIContent(Exception): + def __init__(self, content_type: str, request_desc: str) -> None: super().__init__(content_type, request_desc) self.content_type = content_type self.request_desc = request_desc -def _ensure_html_header(response): - # type: (Response) -> None - """Check the Content-Type header to ensure the response contains HTML. +def _ensure_api_header(response: Response) -> None: + """ + Check the Content-Type header to ensure the response contains a Simple + API Response. - Raises `_NotHTML` if the content type is not text/html. + Raises `_NotAPIContent` if the content type is not a valid content-type. """ - content_type = response.headers.get("Content-Type", "") - if not content_type.lower().startswith("text/html"): - raise _NotHTML(content_type, response.request.method) + content_type = response.headers.get("Content-Type", "Unknown") + + content_type_l = content_type.lower() + if content_type_l.startswith( + ( + "text/html", + "application/vnd.pypi.simple.v1+html", + "application/vnd.pypi.simple.v1+json", + ) + ): + return + + raise _NotAPIContent(content_type, response.request.method) class _NotHTTP(Exception): pass -def _ensure_html_response(url, session): - # type: (str, PipSession) -> None - """Send a HEAD request to the URL, and ensure the response contains HTML. +def _ensure_api_response(url: str, session: PipSession) -> None: + """ + Send a HEAD request to the URL, and ensure the response contains a simple + API Response. Raises `_NotHTTP` if the URL is not available for a HEAD request, or - `_NotHTML` if the content type is not text/html. + `_NotAPIContent` if the content type is not a valid content type. """ scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url) - if scheme not in {'http', 'https'}: + if scheme not in {"http", "https"}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) raise_for_status(resp) - _ensure_html_header(resp) + _ensure_api_header(resp) -def _get_html_response(url, session): - # type: (str, PipSession) -> Response - """Access an HTML page with GET, and return the response. +def _get_simple_response(url: str, session: PipSession) -> Response: + """Access an Simple API response with GET, and return the response. This consists of three parts: 1. If the URL looks suspiciously like an archive, send a HEAD first to - check the Content-Type is HTML, to avoid downloading a large file. - Raise `_NotHTTP` if the content type cannot be determined, or - `_NotHTML` if it is not HTML. + check the Content-Type is HTML or Simple API, to avoid downloading a + large file. Raise `_NotHTTP` if the content type cannot be determined, or + `_NotAPIContent` if it is not HTML or a Simple API. 2. Actually perform the request. Raise HTTP exceptions on network failures. - 3. Check the Content-Type header to make sure we got HTML, and raise - `_NotHTML` otherwise. + 3. Check the Content-Type header to make sure we got a Simple API response, + and raise `_NotAPIContent` otherwise. """ if is_archive_file(Link(url).filename): - _ensure_html_response(url, session=session) + _ensure_api_response(url, session=session) - logger.debug('Getting page %s', redact_auth_from_url(url)) + logger.debug("Getting page %s", redact_auth_from_url(url)) resp = session.get( url, headers={ - "Accept": "text/html", + "Accept": ", ".join( + [ + "application/vnd.pypi.simple.v1+json", + "application/vnd.pypi.simple.v1+html; q=0.1", + "text/html; q=0.01", + ] + ), # We don't want to blindly returned cached data for # /simple/, because authors generally expecting that # twine upload && pip install will function, but if @@ -142,175 +163,61 @@ def _get_html_response(url, session): # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every - # url we cannot know ahead of time for sure if something is HTML - # or not. However we can check after we've downloaded it. - _ensure_html_header(resp) + # url we cannot know ahead of time for sure if something is a + # Simple API response or not. However we can check after we've + # downloaded it. + _ensure_api_header(resp) + + logger.debug( + "Fetched page %s as %s", + redact_auth_from_url(url), + resp.headers.get("Content-Type", "Unknown"), + ) return resp -def _get_encoding_from_headers(headers): - # type: (ResponseHeaders) -> Optional[str] - """Determine if we have any encoding information in our headers. - """ +def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]: + """Determine if we have any encoding information in our headers.""" if headers and "Content-Type" in headers: - content_type, params = cgi.parse_header(headers["Content-Type"]) - if "charset" in params: - return params['charset'] + m = email.message.Message() + m["content-type"] = headers["Content-Type"] + charset = m.get_param("charset") + if charset: + return str(charset) return None -def _determine_base_url(document, page_url): - # type: (HTMLElement, str) -> str - """Determine the HTML document's base URL. - - This looks for a ``<base>`` tag in the HTML document. If present, its href - attribute denotes the base URL of anchor tags in the document. If there is - no such tag (or if it does not have a valid href attribute), the HTML - file's URL is used as the base URL. - - :param document: An HTML document representation. The current - implementation expects the result of ``html5lib.parse()``. - :param page_url: The URL of the HTML document. - """ - for base in document.findall(".//base"): - href = base.get("href") - if href is not None: - return href - return page_url - - -def _clean_url_path_part(part): - # type: (str) -> str - """ - Clean a "part" of a URL path (i.e. after splitting on "@" characters). - """ - # We unquote prior to quoting to make sure nothing is double quoted. - return urllib.parse.quote(urllib.parse.unquote(part)) - - -def _clean_file_url_path(part): - # type: (str) -> str - """ - Clean the first part of a URL path that corresponds to a local - filesystem path (i.e. the first part after splitting on "@" characters). - """ - # We unquote prior to quoting to make sure nothing is double quoted. - # Also, on Windows the path part might contain a drive letter which - # should not be quoted. On Linux where drive letters do not - # exist, the colon should be quoted. We rely on urllib.request - # to do the right thing here. - return urllib.request.pathname2url(urllib.request.url2pathname(part)) - - -# percent-encoded: / -_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE) - - -def _clean_url_path(path, is_local_path): - # type: (str, bool) -> str - """ - Clean the path portion of a URL. - """ - if is_local_path: - clean_func = _clean_file_url_path - else: - clean_func = _clean_url_path_part - - # Split on the reserved characters prior to cleaning so that - # revision strings in VCS URLs are properly preserved. - parts = _reserved_chars_re.split(path) - - cleaned_parts = [] - for to_clean, reserved in pairwise(itertools.chain(parts, [''])): - cleaned_parts.append(clean_func(to_clean)) - # Normalize %xx escapes (e.g. %2f -> %2F) - cleaned_parts.append(reserved.upper()) - - return ''.join(cleaned_parts) - - -def _clean_link(url): - # type: (str) -> str - """ - Make sure a link is fully quoted. - For example, if ' ' occurs in the URL, it will be replaced with "%20", - and without double-quoting other characters. - """ - # Split the URL into parts according to the general structure - # `scheme://netloc/path;parameters?query#fragment`. - result = urllib.parse.urlparse(url) - # If the netloc is empty, then the URL refers to a local filesystem path. - is_local_path = not result.netloc - path = _clean_url_path(result.path, is_local_path=is_local_path) - return urllib.parse.urlunparse(result._replace(path=path)) - - -def _create_link_from_element( - anchor, # type: HTMLElement - page_url, # type: str - base_url, # type: str -): - # type: (...) -> Optional[Link] - """ - Convert an anchor element in a simple repository page to a Link. - """ - href = anchor.get("href") - if not href: - return None - - url = _clean_link(urllib.parse.urljoin(base_url, href)) - pyrequire = anchor.get('data-requires-python') - pyrequire = html.unescape(pyrequire) if pyrequire else None - - yanked_reason = anchor.get('data-yanked') - if yanked_reason: - yanked_reason = html.unescape(yanked_reason) - - link = Link( - url, - comes_from=page_url, - requires_python=pyrequire, - yanked_reason=yanked_reason, - ) - - return link - - class CacheablePageContent: - def __init__(self, page): - # type: (HTMLPage) -> None + def __init__(self, page: "IndexContent") -> None: assert page.cache_link_parsing self.page = page - def __eq__(self, other): - # type: (object) -> bool - return (isinstance(other, type(self)) and - self.page.url == other.page.url) + def __eq__(self, other: object) -> bool: + return isinstance(other, type(self)) and self.page.url == other.page.url - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(self.page.url) -def with_cached_html_pages( - fn, # type: Callable[[HTMLPage], Iterable[Link]] -): - # type: (...) -> Callable[[HTMLPage], List[Link]] +class ParseLinks(Protocol): + def __call__(self, page: "IndexContent") -> Iterable[Link]: + ... + + +def with_cached_index_content(fn: ParseLinks) -> ParseLinks: """ - Given a function that parses an Iterable[Link] from an HTMLPage, cache the - function's result (keyed by CacheablePageContent), unless the HTMLPage + Given a function that parses an Iterable[Link] from an IndexContent, cache the + function's result (keyed by CacheablePageContent), unless the IndexContent `page` has `page.cache_link_parsing == False`. """ @functools.lru_cache(maxsize=None) - def wrapper(cacheable_page): - # type: (CacheablePageContent) -> List[Link] + def wrapper(cacheable_page: CacheablePageContent) -> List[Link]: return list(fn(cacheable_page.page)) @functools.wraps(fn) - def wrapper_wrapper(page): - # type: (HTMLPage) -> List[Link] + def wrapper_wrapper(page: "IndexContent") -> List[Link]: if page.cache_link_parsing: return wrapper(CacheablePageContent(page)) return list(fn(page)) @@ -318,42 +225,46 @@ def with_cached_html_pages( return wrapper_wrapper -@with_cached_html_pages -def parse_links(page): - # type: (HTMLPage) -> Iterable[Link] +@with_cached_index_content +def parse_links(page: "IndexContent") -> Iterable[Link]: """ - Parse an HTML document, and yield its anchor elements as Link objects. + Parse a Simple API's Index Content, and yield its anchor elements as Link objects. """ - document = html5lib.parse( - page.content, - transport_encoding=page.encoding, - namespaceHTMLElements=False, - ) + + content_type_l = page.content_type.lower() + if content_type_l.startswith("application/vnd.pypi.simple.v1+json"): + data = json.loads(page.content) + for file in data.get("files", []): + link = Link.from_json(file, page.url) + if link is None: + continue + yield link + return + + parser = HTMLLinkParser(page.url) + encoding = page.encoding or "utf-8" + parser.feed(page.content.decode(encoding)) url = page.url - base_url = _determine_base_url(document, url) - for anchor in document.findall(".//a"): - link = _create_link_from_element( - anchor, - page_url=url, - base_url=base_url, - ) + base_url = parser.base_url or url + for anchor in parser.anchors: + link = Link.from_element(anchor, page_url=url, base_url=base_url) if link is None: continue yield link -class HTMLPage: - """Represents one page, along with its URL""" +class IndexContent: + """Represents one response (or page), along with its URL""" def __init__( self, - content, # type: bytes - encoding, # type: Optional[str] - url, # type: str - cache_link_parsing=True, # type: bool - ): - # type: (...) -> None + content: bytes, + content_type: str, + encoding: Optional[str], + url: str, + cache_link_parsing: bool = True, + ) -> None: """ :param encoding: the encoding to decode the given content. :param url: the URL from which the HTML was downloaded. @@ -362,90 +273,124 @@ class HTMLPage: have this set to False, for example. """ self.content = content + self.content_type = content_type self.encoding = encoding self.url = url self.cache_link_parsing = cache_link_parsing - def __str__(self): - # type: () -> str + def __str__(self) -> str: return redact_auth_from_url(self.url) -def _handle_get_page_fail( - link, # type: Link - reason, # type: Union[str, Exception] - meth=None # type: Optional[Callable[..., None]] -): - # type: (...) -> None +class HTMLLinkParser(HTMLParser): + """ + HTMLParser that keeps the first base HREF and a list of all anchor + elements' attributes. + """ + + def __init__(self, url: str) -> None: + super().__init__(convert_charrefs=True) + + self.url: str = url + self.base_url: Optional[str] = None + self.anchors: List[Dict[str, Optional[str]]] = [] + + def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None: + if tag == "base" and self.base_url is None: + href = self.get_href(attrs) + if href is not None: + self.base_url = href + elif tag == "a": + self.anchors.append(dict(attrs)) + + def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]: + for name, value in attrs: + if name == "href": + return value + return None + + +def _handle_get_simple_fail( + link: Link, + reason: Union[str, Exception], + meth: Optional[Callable[..., None]] = None, +) -> None: if meth is None: meth = logger.debug meth("Could not fetch URL %s: %s - skipping", link, reason) -def _make_html_page(response, cache_link_parsing=True): - # type: (Response, bool) -> HTMLPage +def _make_index_content( + response: Response, cache_link_parsing: bool = True +) -> IndexContent: encoding = _get_encoding_from_headers(response.headers) - return HTMLPage( + return IndexContent( response.content, + response.headers["Content-Type"], encoding=encoding, url=response.url, - cache_link_parsing=cache_link_parsing) - + cache_link_parsing=cache_link_parsing, + ) -def _get_html_page(link, session=None): - # type: (Link, Optional[PipSession]) -> Optional[HTMLPage] - if session is None: - raise TypeError( - "_get_html_page() missing 1 required keyword argument: 'session'" - ) - url = link.url.split('#', 1)[0] +def _get_index_content(link: Link, *, session: PipSession) -> Optional["IndexContent"]: + url = link.url.split("#", 1)[0] # Check for VCS schemes that do not support lookup as web pages. vcs_scheme = _match_vcs_scheme(url) if vcs_scheme: - logger.warning('Cannot look at %s URL %s because it does not support ' - 'lookup as web pages.', vcs_scheme, link) + logger.warning( + "Cannot look at %s URL %s because it does not support lookup as web pages.", + vcs_scheme, + link, + ) return None # Tack index.html onto file:// URLs that point to directories scheme, _, path, _, _, _ = urllib.parse.urlparse(url) - if (scheme == 'file' and os.path.isdir(urllib.request.url2pathname(path))): + if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)): # add trailing slash if not present so urljoin doesn't trim # final segment - if not url.endswith('/'): - url += '/' - url = urllib.parse.urljoin(url, 'index.html') - logger.debug(' file: URL is directory, getting %s', url) + if not url.endswith("/"): + url += "/" + # TODO: In the future, it would be nice if pip supported PEP 691 + # style responses in the file:// URLs, however there's no + # standard file extension for application/vnd.pypi.simple.v1+json + # so we'll need to come up with something on our own. + url = urllib.parse.urljoin(url, "index.html") + logger.debug(" file: URL is directory, getting %s", url) try: - resp = _get_html_response(url, session=session) + resp = _get_simple_response(url, session=session) except _NotHTTP: logger.warning( - 'Skipping page %s because it looks like an archive, and cannot ' - 'be checked by a HTTP HEAD request.', link, + "Skipping page %s because it looks like an archive, and cannot " + "be checked by a HTTP HEAD request.", + link, ) - except _NotHTML as exc: + except _NotAPIContent as exc: logger.warning( - 'Skipping page %s because the %s request got Content-Type: %s.' - 'The only supported Content-Type is text/html', - link, exc.request_desc, exc.content_type, + "Skipping page %s because the %s request got Content-Type: %s. " + "The only supported Content-Types are application/vnd.pypi.simple.v1+json, " + "application/vnd.pypi.simple.v1+html, and text/html", + link, + exc.request_desc, + exc.content_type, ) except NetworkConnectionError as exc: - _handle_get_page_fail(link, exc) + _handle_get_simple_fail(link, exc) except RetryError as exc: - _handle_get_page_fail(link, exc) + _handle_get_simple_fail(link, exc) except SSLError as exc: reason = "There was a problem confirming the ssl certificate: " reason += str(exc) - _handle_get_page_fail(link, reason, meth=logger.info) + _handle_get_simple_fail(link, reason, meth=logger.info) except requests.ConnectionError as exc: - _handle_get_page_fail(link, f"connection error: {exc}") + _handle_get_simple_fail(link, f"connection error: {exc}") except requests.Timeout: - _handle_get_page_fail(link, "timed out") + _handle_get_simple_fail(link, "timed out") else: - return _make_html_page(resp, - cache_link_parsing=link.cache_link_parsing) + return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing) return None @@ -465,16 +410,19 @@ class LinkCollector: def __init__( self, - session, # type: PipSession - search_scope, # type: SearchScope - ): - # type: (...) -> None + session: PipSession, + search_scope: SearchScope, + ) -> None: self.search_scope = search_scope self.session = session @classmethod - def create(cls, session, options, suppress_no_index=False): - # type: (PipSession, Values, bool) -> LinkCollector + def create( + cls, + session: PipSession, + options: Values, + suppress_no_index: bool = False, + ) -> "LinkCollector": """ :param session: The Session to use to make requests. :param suppress_no_index: Whether to ignore the --no-index option @@ -483,8 +431,8 @@ class LinkCollector: index_urls = [options.index_url] + options.extra_index_urls if options.no_index and not suppress_no_index: logger.debug( - 'Ignoring indexes: %s', - ','.join(redact_auth_from_url(url) for url in index_urls), + "Ignoring indexes: %s", + ",".join(redact_auth_from_url(url) for url in index_urls), ) index_urls = [] @@ -492,24 +440,25 @@ class LinkCollector: find_links = options.find_links or [] search_scope = SearchScope.create( - find_links=find_links, index_urls=index_urls, + find_links=find_links, + index_urls=index_urls, + no_index=options.no_index, ) link_collector = LinkCollector( - session=session, search_scope=search_scope, + session=session, + search_scope=search_scope, ) return link_collector @property - def find_links(self): - # type: () -> List[str] + def find_links(self) -> List[str]: return self.search_scope.find_links - def fetch_page(self, location): - # type: (Link) -> Optional[HTMLPage] + def fetch_response(self, location: Link) -> Optional[IndexContent]: """ Fetch an HTML page containing package links. """ - return _get_html_page(location, session=self.session) + return _get_index_content(location, session=self.session) def collect_sources( self, diff --git a/env/Lib/site-packages/pip/_internal/index/package_finder.py b/env/Lib/site-packages/pip/_internal/index/package_finder.py index 7f2e04e7c37fa8c87af9f1c32f24eb3d2be84129..b6f8d57e854b77f60c04f59a7f3ff74476a5f5d6 100644 --- a/env/Lib/site-packages/pip/_internal/index/package_finder.py +++ b/env/Lib/site-packages/pip/_internal/index/package_finder.py @@ -1,13 +1,11 @@ """Routines related to PyPI, indexes""" -# The following comment should be removed at some point in the future. -# mypy: strict-optional=False - +import enum import functools import itertools import logging import re -from typing import FrozenSet, Iterable, List, Optional, Set, Tuple, Union +from typing import TYPE_CHECKING, FrozenSet, Iterable, List, Optional, Set, Tuple, Union from pip._vendor.packaging import specifiers from pip._vendor.packaging.tags import Tag @@ -30,31 +28,31 @@ from pip._internal.models.selection_prefs import SelectionPreferences from pip._internal.models.target_python import TargetPython from pip._internal.models.wheel import Wheel from pip._internal.req import InstallRequirement +from pip._internal.utils._log import getLogger from pip._internal.utils.filetypes import WHEEL_EXTENSION from pip._internal.utils.hashes import Hashes from pip._internal.utils.logging import indent_log from pip._internal.utils.misc import build_netloc from pip._internal.utils.packaging import check_requires_python from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS -from pip._internal.utils.urls import url_to_path -__all__ = ['FormatControl', 'BestCandidateResult', 'PackageFinder'] +if TYPE_CHECKING: + from pip._vendor.typing_extensions import TypeGuard + +__all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"] -logger = logging.getLogger(__name__) +logger = getLogger(__name__) BuildTag = Union[Tuple[()], Tuple[int, str]] -CandidateSortingKey = ( - Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag] -) +CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag] def _check_link_requires_python( - link, # type: Link - version_info, # type: Tuple[int, int, int] - ignore_requires_python=False, # type: bool -): - # type: (...) -> bool + link: Link, + version_info: Tuple[int, int, int], + ignore_requires_python: bool = False, +) -> bool: """ Return whether the given Python version is compatible with a link's "Requires-Python" value. @@ -66,39 +64,54 @@ def _check_link_requires_python( """ try: is_compatible = check_requires_python( - link.requires_python, version_info=version_info, + link.requires_python, + version_info=version_info, ) except specifiers.InvalidSpecifier: logger.debug( "Ignoring invalid Requires-Python (%r) for link: %s", - link.requires_python, link, + link.requires_python, + link, ) else: if not is_compatible: - version = '.'.join(map(str, version_info)) + version = ".".join(map(str, version_info)) if not ignore_requires_python: - logger.debug( - 'Link requires a different Python (%s not in: %r): %s', - version, link.requires_python, link, + logger.verbose( + "Link requires a different Python (%s not in: %r): %s", + version, + link.requires_python, + link, ) return False logger.debug( - 'Ignoring failed Requires-Python check (%s not in: %r) ' - 'for link: %s', - version, link.requires_python, link, + "Ignoring failed Requires-Python check (%s not in: %r) for link: %s", + version, + link.requires_python, + link, ) return True +class LinkType(enum.Enum): + candidate = enum.auto() + different_project = enum.auto() + yanked = enum.auto() + format_unsupported = enum.auto() + format_invalid = enum.auto() + platform_mismatch = enum.auto() + requires_python_mismatch = enum.auto() + + class LinkEvaluator: """ Responsible for evaluating links for a particular project. """ - _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') + _py_version_re = re.compile(r"-py([123]\.?[0-9]?)$") # Don't include an allow_yanked default value to make sure each call # site considers whether yanked releases are allowed. This also causes @@ -106,14 +119,13 @@ class LinkEvaluator: # people when reading the code. def __init__( self, - project_name, # type: str - canonical_name, # type: str - formats, # type: FrozenSet[str] - target_python, # type: TargetPython - allow_yanked, # type: bool - ignore_requires_python=None, # type: Optional[bool] - ): - # type: (...) -> None + project_name: str, + canonical_name: str, + formats: FrozenSet[str], + target_python: TargetPython, + allow_yanked: bool, + ignore_requires_python: Optional[bool] = None, + ) -> None: """ :param project_name: The user supplied package name. :param canonical_name: The canonical package name. @@ -142,20 +154,20 @@ class LinkEvaluator: self.project_name = project_name - def evaluate_link(self, link): - # type: (Link) -> Tuple[bool, Optional[str]] + def evaluate_link(self, link: Link) -> Tuple[LinkType, str]: """ Determine whether a link is a candidate for installation. - :return: A tuple (is_candidate, result), where `result` is (1) a - version string if `is_candidate` is True, and (2) if - `is_candidate` is False, an optional string to log the reason - the link fails to qualify. + :return: A tuple (result, detail), where *result* is an enum + representing whether the evaluation found a candidate, or the reason + why one is not found. If a candidate is found, *detail* will be the + candidate's version string; if one is not found, it contains the + reason the link fails to qualify. """ version = None if link.is_yanked and not self._allow_yanked: - reason = link.yanked_reason or '<none given>' - return (False, f'yanked for reason: {reason}') + reason = link.yanked_reason or "<none given>" + return (LinkType.yanked, f"yanked for reason: {reason}") if link.egg_fragment: egg_info = link.egg_fragment @@ -163,80 +175,85 @@ class LinkEvaluator: else: egg_info, ext = link.splitext() if not ext: - return (False, 'not a file') + return (LinkType.format_unsupported, "not a file") if ext not in SUPPORTED_EXTENSIONS: - return (False, f'unsupported archive format: {ext}') + return ( + LinkType.format_unsupported, + f"unsupported archive format: {ext}", + ) if "binary" not in self._formats and ext == WHEEL_EXTENSION: - reason = 'No binaries permitted for {}'.format( - self.project_name) - return (False, reason) - if "macosx10" in link.path and ext == '.zip': - return (False, 'macosx10 one') + reason = f"No binaries permitted for {self.project_name}" + return (LinkType.format_unsupported, reason) + if "macosx10" in link.path and ext == ".zip": + return (LinkType.format_unsupported, "macosx10 one") if ext == WHEEL_EXTENSION: try: wheel = Wheel(link.filename) except InvalidWheelFilename: - return (False, 'invalid wheel filename') + return ( + LinkType.format_invalid, + "invalid wheel filename", + ) if canonicalize_name(wheel.name) != self._canonical_name: - reason = 'wrong project name (not {})'.format( - self.project_name) - return (False, reason) + reason = f"wrong project name (not {self.project_name})" + return (LinkType.different_project, reason) supported_tags = self._target_python.get_tags() if not wheel.supported(supported_tags): # Include the wheel's tags in the reason string to # simplify troubleshooting compatibility issues. - file_tags = wheel.get_formatted_file_tags() + file_tags = ", ".join(wheel.get_formatted_file_tags()) reason = ( - "none of the wheel's tags ({}) are compatible " - "(run pip debug --verbose to show compatible tags)".format( - ', '.join(file_tags) - ) + f"none of the wheel's tags ({file_tags}) are compatible " + f"(run pip debug --verbose to show compatible tags)" ) - return (False, reason) + return (LinkType.platform_mismatch, reason) version = wheel.version # This should be up by the self.ok_binary check, but see issue 2700. if "source" not in self._formats and ext != WHEEL_EXTENSION: - reason = f'No sources permitted for {self.project_name}' - return (False, reason) + reason = f"No sources permitted for {self.project_name}" + return (LinkType.format_unsupported, reason) if not version: version = _extract_version_from_fragment( - egg_info, self._canonical_name, + egg_info, + self._canonical_name, ) if not version: - reason = f'Missing project version for {self.project_name}' - return (False, reason) + reason = f"Missing project version for {self.project_name}" + return (LinkType.format_invalid, reason) match = self._py_version_re.search(version) if match: - version = version[:match.start()] + version = version[: match.start()] py_version = match.group(1) if py_version != self._target_python.py_version: - return (False, 'Python version is incorrect') + return ( + LinkType.platform_mismatch, + "Python version is incorrect", + ) supports_python = _check_link_requires_python( - link, version_info=self._target_python.py_version_info, + link, + version_info=self._target_python.py_version_info, ignore_requires_python=self._ignore_requires_python, ) if not supports_python: - # Return None for the reason text to suppress calling - # _log_skipped_link(). - return (False, None) + reason = f"{version} Requires-Python {link.requires_python}" + return (LinkType.requires_python_mismatch, reason) - logger.debug('Found link %s, version: %s', link, version) + logger.debug("Found link %s, version: %s", link, version) - return (True, version) + return (LinkType.candidate, version) def filter_unallowed_hashes( - candidates, # type: List[InstallationCandidate] - hashes, # type: Hashes - project_name, # type: str -): - # type: (...) -> List[InstallationCandidate] + candidates: List[InstallationCandidate], + hashes: Optional[Hashes], + project_name: str, +) -> List[InstallationCandidate]: """ Filter out candidates whose hashes aren't allowed, and return a new list of candidates. @@ -254,8 +271,8 @@ def filter_unallowed_hashes( """ if not hashes: logger.debug( - 'Given no hashes to check %s links for project %r: ' - 'discarding no candidates', + "Given no hashes to check %s links for project %r: " + "discarding no candidates", len(candidates), project_name, ) @@ -285,22 +302,22 @@ def filter_unallowed_hashes( filtered = list(candidates) if len(filtered) == len(candidates): - discard_message = 'discarding no candidates' + discard_message = "discarding no candidates" else: - discard_message = 'discarding {} non-matches:\n {}'.format( + discard_message = "discarding {} non-matches:\n {}".format( len(non_matches), - '\n '.join(str(candidate.link) for candidate in non_matches) + "\n ".join(str(candidate.link) for candidate in non_matches), ) logger.debug( - 'Checked %s links for project %r against %s hashes ' - '(%s matches, %s no digest): %s', + "Checked %s links for project %r against %s hashes " + "(%s matches, %s no digest): %s", len(candidates), project_name, hashes.digest_count, match_count, len(matches_or_no_digest) - match_count, - discard_message + discard_message, ) return filtered @@ -315,10 +332,9 @@ class CandidatePreferences: def __init__( self, - prefer_binary=False, # type: bool - allow_all_prereleases=False, # type: bool - ): - # type: (...) -> None + prefer_binary: bool = False, + allow_all_prereleases: bool = False, + ) -> None: """ :param allow_all_prereleases: Whether to allow all pre-releases. """ @@ -335,11 +351,10 @@ class BestCandidateResult: def __init__( self, - candidates, # type: List[InstallationCandidate] - applicable_candidates, # type: List[InstallationCandidate] - best_candidate, # type: Optional[InstallationCandidate] - ): - # type: (...) -> None + candidates: List[InstallationCandidate], + applicable_candidates: List[InstallationCandidate], + best_candidate: Optional[InstallationCandidate], + ) -> None: """ :param candidates: A sequence of all available candidates found. :param applicable_candidates: The applicable candidates. @@ -358,16 +373,12 @@ class BestCandidateResult: self.best_candidate = best_candidate - def iter_all(self): - # type: () -> Iterable[InstallationCandidate] - """Iterate through all candidates. - """ + def iter_all(self) -> Iterable[InstallationCandidate]: + """Iterate through all candidates.""" return iter(self._candidates) - def iter_applicable(self): - # type: () -> Iterable[InstallationCandidate] - """Iterate through the applicable candidates. - """ + def iter_applicable(self) -> Iterable[InstallationCandidate]: + """Iterate through the applicable candidates.""" return iter(self._applicable_candidates) @@ -381,14 +392,13 @@ class CandidateEvaluator: @classmethod def create( cls, - project_name, # type: str - target_python=None, # type: Optional[TargetPython] - prefer_binary=False, # type: bool - allow_all_prereleases=False, # type: bool - specifier=None, # type: Optional[specifiers.BaseSpecifier] - hashes=None, # type: Optional[Hashes] - ): - # type: (...) -> CandidateEvaluator + project_name: str, + target_python: Optional[TargetPython] = None, + prefer_binary: bool = False, + allow_all_prereleases: bool = False, + specifier: Optional[specifiers.BaseSpecifier] = None, + hashes: Optional[Hashes] = None, + ) -> "CandidateEvaluator": """Create a CandidateEvaluator object. :param target_python: The target Python interpreter to use when @@ -417,14 +427,13 @@ class CandidateEvaluator: def __init__( self, - project_name, # type: str - supported_tags, # type: List[Tag] - specifier, # type: specifiers.BaseSpecifier - prefer_binary=False, # type: bool - allow_all_prereleases=False, # type: bool - hashes=None, # type: Optional[Hashes] - ): - # type: (...) -> None + project_name: str, + supported_tags: List[Tag], + specifier: specifiers.BaseSpecifier, + prefer_binary: bool = False, + allow_all_prereleases: bool = False, + hashes: Optional[Hashes] = None, + ) -> None: """ :param supported_tags: The PEP 425 tags supported by the target Python in order of preference (most preferred first). @@ -444,9 +453,8 @@ class CandidateEvaluator: def get_applicable_candidates( self, - candidates, # type: List[InstallationCandidate] - ): - # type: (...) -> List[InstallationCandidate] + candidates: List[InstallationCandidate], + ) -> List[InstallationCandidate]: """ Return the applicable candidates from a list of candidates. """ @@ -454,7 +462,8 @@ class CandidateEvaluator: allow_prereleases = self._allow_all_prereleases or None specifier = self._specifier versions = { - str(v) for v in specifier.filter( + str(v) + for v in specifier.filter( # We turn the version object into a str here because otherwise # when we're debundled but setuptools isn't, Python will see # packaging.version.Version and @@ -468,9 +477,7 @@ class CandidateEvaluator: } # Again, converting version to str to deal with debundling. - applicable_candidates = [ - c for c in candidates if str(c.version) in versions - ] + applicable_candidates = [c for c in candidates if str(c.version) in versions] filtered_applicable_candidates = filter_unallowed_hashes( candidates=applicable_candidates, @@ -480,8 +487,7 @@ class CandidateEvaluator: return sorted(filtered_applicable_candidates, key=self._sort_key) - def _sort_key(self, candidate): - # type: (InstallationCandidate) -> CandidateSortingKey + def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey: """ Function to pass as the `key` argument to a call to sorted() to sort InstallationCandidates by preference. @@ -513,16 +519,18 @@ class CandidateEvaluator: """ valid_tags = self._supported_tags support_num = len(valid_tags) - build_tag = () # type: BuildTag + build_tag: BuildTag = () binary_preference = 0 link = candidate.link if link.is_wheel: # can raise InvalidWheelFilename wheel = Wheel(link.filename) try: - pri = -(wheel.find_most_preferred_tag( - valid_tags, self._wheel_tag_preferences - )) + pri = -( + wheel.find_most_preferred_tag( + valid_tags, self._wheel_tag_preferences + ) + ) except ValueError: raise UnsupportedWheel( "{} is not a supported wheel for this platform. It " @@ -531,7 +539,8 @@ class CandidateEvaluator: if self._prefer_binary: binary_preference = 1 if wheel.build_tag is not None: - match = re.match(r'^(\d+)(.*)$', wheel.build_tag) + match = re.match(r"^(\d+)(.*)$", wheel.build_tag) + assert match is not None, "guaranteed by filename validation" build_tag_groups = match.groups() build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) else: # sdist @@ -539,15 +548,18 @@ class CandidateEvaluator: has_allowed_hash = int(link.is_hash_allowed(self._hashes)) yank_value = -1 * int(link.is_yanked) # -1 for yanked. return ( - has_allowed_hash, yank_value, binary_preference, candidate.version, - pri, build_tag, + has_allowed_hash, + yank_value, + binary_preference, + candidate.version, + pri, + build_tag, ) def sort_best_candidate( self, - candidates, # type: List[InstallationCandidate] - ): - # type: (...) -> Optional[InstallationCandidate] + candidates: List[InstallationCandidate], + ) -> Optional[InstallationCandidate]: """ Return the best candidate per the instance's sort order, or None if no candidate is acceptable. @@ -559,9 +571,8 @@ class CandidateEvaluator: def compute_best_candidate( self, - candidates, # type: List[InstallationCandidate] - ): - # type: (...) -> BestCandidateResult + candidates: List[InstallationCandidate], + ) -> BestCandidateResult: """ Compute and return a `BestCandidateResult` instance. """ @@ -585,14 +596,13 @@ class PackageFinder: def __init__( self, - link_collector, # type: LinkCollector - target_python, # type: TargetPython - allow_yanked, # type: bool - format_control=None, # type: Optional[FormatControl] - candidate_prefs=None, # type: CandidatePreferences - ignore_requires_python=None, # type: Optional[bool] - ): - # type: (...) -> None + link_collector: LinkCollector, + target_python: TargetPython, + allow_yanked: bool, + format_control: Optional[FormatControl] = None, + candidate_prefs: Optional[CandidatePreferences] = None, + ignore_requires_python: Optional[bool] = None, + ) -> None: """ This constructor is primarily meant to be used by the create() class method and from tests. @@ -617,7 +627,7 @@ class PackageFinder: self.format_control = format_control # These are boring links that have already been logged somehow. - self._logged_links = set() # type: Set[Link] + self._logged_links: Set[Tuple[Link, LinkType, str]] = set() # Don't include an allow_yanked default value to make sure each call # site considers whether yanked releases are allowed. This also causes @@ -626,11 +636,10 @@ class PackageFinder: @classmethod def create( cls, - link_collector, # type: LinkCollector - selection_prefs, # type: SelectionPreferences - target_python=None, # type: Optional[TargetPython] - ): - # type: (...) -> PackageFinder + link_collector: LinkCollector, + selection_prefs: SelectionPreferences, + target_python: Optional[TargetPython] = None, + ) -> "PackageFinder": """Create a PackageFinder. :param selection_prefs: The candidate selection preferences, as a @@ -657,56 +666,53 @@ class PackageFinder: ) @property - def target_python(self): - # type: () -> TargetPython + def target_python(self) -> TargetPython: return self._target_python @property - def search_scope(self): - # type: () -> SearchScope + def search_scope(self) -> SearchScope: return self._link_collector.search_scope @search_scope.setter - def search_scope(self, search_scope): - # type: (SearchScope) -> None + def search_scope(self, search_scope: SearchScope) -> None: self._link_collector.search_scope = search_scope @property - def find_links(self): - # type: () -> List[str] + def find_links(self) -> List[str]: return self._link_collector.find_links @property - def index_urls(self): - # type: () -> List[str] + def index_urls(self) -> List[str]: return self.search_scope.index_urls @property - def trusted_hosts(self): - # type: () -> Iterable[str] + def trusted_hosts(self) -> Iterable[str]: for host_port in self._link_collector.session.pip_trusted_origins: yield build_netloc(*host_port) @property - def allow_all_prereleases(self): - # type: () -> bool + def allow_all_prereleases(self) -> bool: return self._candidate_prefs.allow_all_prereleases - def set_allow_all_prereleases(self): - # type: () -> None + def set_allow_all_prereleases(self) -> None: self._candidate_prefs.allow_all_prereleases = True @property - def prefer_binary(self): - # type: () -> bool + def prefer_binary(self) -> bool: return self._candidate_prefs.prefer_binary - def set_prefer_binary(self): - # type: () -> None + def set_prefer_binary(self) -> None: self._candidate_prefs.prefer_binary = True - def make_link_evaluator(self, project_name): - # type: (str) -> LinkEvaluator + def requires_python_skipped_reasons(self) -> List[str]: + reasons = { + detail + for _, result, detail in self._logged_links + if result == LinkType.requires_python_mismatch + } + return sorted(reasons) + + def make_link_evaluator(self, project_name: str) -> LinkEvaluator: canonical_name = canonicalize_name(project_name) formats = self.format_control.get_allowed_formats(canonical_name) @@ -719,14 +725,13 @@ class PackageFinder: ignore_requires_python=self._ignore_requires_python, ) - def _sort_links(self, links): - # type: (Iterable[Link]) -> List[Link] + def _sort_links(self, links: Iterable[Link]) -> List[Link]: """ Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates """ eggs, no_eggs = [], [] - seen = set() # type: Set[Link] + seen: Set[Link] = set() for link in links: if link not in seen: seen.add(link) @@ -736,34 +741,35 @@ class PackageFinder: no_eggs.append(link) return no_eggs + eggs - def _log_skipped_link(self, link, reason): - # type: (Link, str) -> None - if link not in self._logged_links: + def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None: + entry = (link, result, detail) + if entry not in self._logged_links: # Put the link at the end so the reason is more visible and because # the link string is usually very long. - logger.debug('Skipping link: %s: %s', reason, link) - self._logged_links.add(link) + logger.debug("Skipping link: %s: %s", detail, link) + self._logged_links.add(entry) - def get_install_candidate(self, link_evaluator, link): - # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate] + def get_install_candidate( + self, link_evaluator: LinkEvaluator, link: Link + ) -> Optional[InstallationCandidate]: """ If the link is a candidate for install, convert it to an InstallationCandidate and return it. Otherwise, return None. """ - is_candidate, result = link_evaluator.evaluate_link(link) - if not is_candidate: - if result: - self._log_skipped_link(link, reason=result) + result, detail = link_evaluator.evaluate_link(link) + if result != LinkType.candidate: + self._log_skipped_link(link, result, detail) return None return InstallationCandidate( name=link_evaluator.project_name, link=link, - version=result, + version=detail, ) - def evaluate_links(self, link_evaluator, links): - # type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate] + def evaluate_links( + self, link_evaluator: LinkEvaluator, links: Iterable[Link] + ) -> List[InstallationCandidate]: """ Convert links that are candidates to InstallationCandidate objects. """ @@ -775,16 +781,18 @@ class PackageFinder: return candidates - def process_project_url(self, project_url, link_evaluator): - # type: (Link, LinkEvaluator) -> List[InstallationCandidate] + def process_project_url( + self, project_url: Link, link_evaluator: LinkEvaluator + ) -> List[InstallationCandidate]: logger.debug( - 'Fetching project page and analyzing links: %s', project_url, + "Fetching project page and analyzing links: %s", + project_url, ) - html_page = self._link_collector.fetch_page(project_url) - if html_page is None: + index_response = self._link_collector.fetch_response(project_url) + if index_response is None: return [] - page_links = list(parse_links(html_page)) + page_links = list(parse_links(index_response)) with indent_log(): package_links = self.evaluate_links( @@ -795,8 +803,7 @@ class PackageFinder: return package_links @functools.lru_cache(maxsize=None) - def find_all_candidates(self, project_name): - # type: (str) -> List[InstallationCandidate] + def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]: """Find all available InstallationCandidate for project_name This checks index_urls and find_links. @@ -835,7 +842,14 @@ class PackageFinder: ) if logger.isEnabledFor(logging.DEBUG) and file_candidates: - paths = [url_to_path(c.link.url) for c in file_candidates] + paths = [] + for candidate in file_candidates: + assert candidate.link.url # we need to have a URL + try: + paths.append(candidate.link.file_path) + except Exception: + paths.append(candidate.link.url) # it's not a local file + logger.debug("Local files found: %s", ", ".join(paths)) # This is an intentional priority ordering @@ -843,13 +857,11 @@ class PackageFinder: def make_candidate_evaluator( self, - project_name, # type: str - specifier=None, # type: Optional[specifiers.BaseSpecifier] - hashes=None, # type: Optional[Hashes] - ): - # type: (...) -> CandidateEvaluator - """Create a CandidateEvaluator object to use. - """ + project_name: str, + specifier: Optional[specifiers.BaseSpecifier] = None, + hashes: Optional[Hashes] = None, + ) -> CandidateEvaluator: + """Create a CandidateEvaluator object to use.""" candidate_prefs = self._candidate_prefs return CandidateEvaluator.create( project_name=project_name, @@ -863,11 +875,10 @@ class PackageFinder: @functools.lru_cache(maxsize=None) def find_best_candidate( self, - project_name, # type: str - specifier=None, # type: Optional[specifiers.BaseSpecifier] - hashes=None, # type: Optional[Hashes] - ): - # type: (...) -> BestCandidateResult + project_name: str, + specifier: Optional[specifiers.BaseSpecifier] = None, + hashes: Optional[Hashes] = None, + ) -> BestCandidateResult: """Find matches for the given project and specifier. :param specifier: An optional object implementing `filter` @@ -884,8 +895,9 @@ class PackageFinder: ) return candidate_evaluator.compute_best_candidate(candidates) - def find_requirement(self, req, upgrade): - # type: (InstallRequirement, bool) -> Optional[InstallationCandidate] + def find_requirement( + self, req: InstallRequirement, upgrade: bool + ) -> Optional[InstallationCandidate]: """Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean @@ -894,80 +906,86 @@ class PackageFinder: """ hashes = req.hashes(trust_internet=False) best_candidate_result = self.find_best_candidate( - req.name, specifier=req.specifier, hashes=hashes, + req.name, + specifier=req.specifier, + hashes=hashes, ) best_candidate = best_candidate_result.best_candidate - installed_version = None # type: Optional[_BaseVersion] + installed_version: Optional[_BaseVersion] = None if req.satisfied_by is not None: - installed_version = parse_version(req.satisfied_by.version) + installed_version = req.satisfied_by.version - def _format_versions(cand_iter): - # type: (Iterable[InstallationCandidate]) -> str + def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str: # This repeated parse_version and str() conversion is needed to # handle different vendoring sources from pip and pkg_resources. # If we stop using the pkg_resources provided specifier and start # using our own, we can drop the cast to str(). - return ", ".join(sorted( - {str(c.version) for c in cand_iter}, - key=parse_version, - )) or "none" + return ( + ", ".join( + sorted( + {str(c.version) for c in cand_iter}, + key=parse_version, + ) + ) + or "none" + ) if installed_version is None and best_candidate is None: logger.critical( - 'Could not find a version that satisfies the requirement %s ' - '(from versions: %s)', + "Could not find a version that satisfies the requirement %s " + "(from versions: %s)", req, _format_versions(best_candidate_result.iter_all()), ) raise DistributionNotFound( - 'No matching distribution found for {}'.format( - req) + "No matching distribution found for {}".format(req) ) - best_installed = False - if installed_version and ( - best_candidate is None or - best_candidate.version <= installed_version): - best_installed = True + def _should_install_candidate( + candidate: Optional[InstallationCandidate], + ) -> "TypeGuard[InstallationCandidate]": + if installed_version is None: + return True + if best_candidate is None: + return False + return best_candidate.version > installed_version if not upgrade and installed_version is not None: - if best_installed: + if _should_install_candidate(best_candidate): logger.debug( - 'Existing installed version (%s) is most up-to-date and ' - 'satisfies requirement', + "Existing installed version (%s) satisfies requirement " + "(most up-to-date version is %s)", installed_version, + best_candidate.version, ) else: logger.debug( - 'Existing installed version (%s) satisfies requirement ' - '(most up-to-date version is %s)', + "Existing installed version (%s) is most up-to-date and " + "satisfies requirement", installed_version, - best_candidate.version, ) return None - if best_installed: - # We have an existing version, and its the best version + if _should_install_candidate(best_candidate): logger.debug( - 'Installed version (%s) is most up-to-date (past versions: ' - '%s)', - installed_version, + "Using version %s (newest of versions: %s)", + best_candidate.version, _format_versions(best_candidate_result.iter_applicable()), ) - raise BestVersionAlreadyInstalled + return best_candidate + # We have an existing version, and its the best version logger.debug( - 'Using version %s (newest of versions: %s)', - best_candidate.version, + "Installed version (%s) is most up-to-date (past versions: %s)", + installed_version, _format_versions(best_candidate_result.iter_applicable()), ) - return best_candidate + raise BestVersionAlreadyInstalled -def _find_name_version_sep(fragment, canonical_name): - # type: (str, str) -> int +def _find_name_version_sep(fragment: str, canonical_name: str) -> int: """Find the separator's index based on the package's canonical name. :param fragment: A <package>+<version> filename "fragment" (stem) or @@ -993,8 +1011,7 @@ def _find_name_version_sep(fragment, canonical_name): raise ValueError(f"{fragment} does not match {canonical_name}") -def _extract_version_from_fragment(fragment, canonical_name): - # type: (str, str) -> Optional[str] +def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]: """Parse the version string from a <package>+<version> filename "fragment" (stem) or egg fragment. diff --git a/env/Lib/site-packages/pip/_internal/index/sources.py b/env/Lib/site-packages/pip/_internal/index/sources.py index eec3f12f7e394a9eba2ebc43cf754a0040cdebf3..cd9cb8d40f135d1da7d2517630816605a0805fe7 100644 --- a/env/Lib/site-packages/pip/_internal/index/sources.py +++ b/env/Lib/site-packages/pip/_internal/index/sources.py @@ -171,7 +171,6 @@ def build_source( expand_dir: bool, cache_link_parsing: bool, ) -> Tuple[Optional[str], Optional[LinkSource]]: - path: Optional[str] = None url: Optional[str] = None if os.path.exists(location): # Is a local path. diff --git a/env/Lib/site-packages/pip/_internal/locations/__init__.py b/env/Lib/site-packages/pip/_internal/locations/__init__.py index 3acb51bc4545ad039f1bde699bac7a02921affc8..d54bc63eba364bda3f869a0f3b1863b872f9682a 100644 --- a/env/Lib/site-packages/pip/_internal/locations/__init__.py +++ b/env/Lib/site-packages/pip/_internal/locations/__init__.py @@ -1,16 +1,22 @@ +import functools import logging +import os import pathlib import sys import sysconfig -from typing import List, Optional +from typing import Any, Dict, Generator, Optional, Tuple from pip._internal.models.scheme import SCHEME_KEYS, Scheme +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.deprecation import deprecated +from pip._internal.utils.virtualenv import running_under_virtualenv -from . import _distutils, _sysconfig +from . import _sysconfig from .base import ( USER_CACHE_DIR, get_major_minor_version, get_src_prefix, + is_osx_framework, site_packages, user_site, ) @@ -21,7 +27,6 @@ __all__ = [ "get_bin_user", "get_major_minor_version", "get_platlib", - "get_prefixed_libs", "get_purelib", "get_scheme", "get_src_prefix", @@ -33,28 +38,177 @@ __all__ = [ logger = logging.getLogger(__name__) -def _default_base(*, user: bool) -> str: - if user: - base = sysconfig.get_config_var("userbase") - else: - base = sysconfig.get_config_var("base") - assert base is not None - return base +_PLATLIBDIR: str = getattr(sys, "platlibdir", "lib") +_USE_SYSCONFIG_DEFAULT = sys.version_info >= (3, 10) -def _warn_if_mismatch(old: pathlib.Path, new: pathlib.Path, *, key: str) -> bool: - if old == new: + +def _should_use_sysconfig() -> bool: + """This function determines the value of _USE_SYSCONFIG. + + By default, pip uses sysconfig on Python 3.10+. + But Python distributors can override this decision by setting: + sysconfig._PIP_USE_SYSCONFIG = True / False + Rationale in https://github.com/pypa/pip/issues/10647 + + This is a function for testability, but should be constant during any one + run. + """ + return bool(getattr(sysconfig, "_PIP_USE_SYSCONFIG", _USE_SYSCONFIG_DEFAULT)) + + +_USE_SYSCONFIG = _should_use_sysconfig() + +if not _USE_SYSCONFIG: + # Import distutils lazily to avoid deprecation warnings, + # but import it soon enough that it is in memory and available during + # a pip reinstall. + from . import _distutils + +# Be noisy about incompatibilities if this platforms "should" be using +# sysconfig, but is explicitly opting out and using distutils instead. +if _USE_SYSCONFIG_DEFAULT and not _USE_SYSCONFIG: + _MISMATCH_LEVEL = logging.WARNING +else: + _MISMATCH_LEVEL = logging.DEBUG + + +def _looks_like_bpo_44860() -> bool: + """The resolution to bpo-44860 will change this incorrect platlib. + + See <https://bugs.python.org/issue44860>. + """ + from distutils.command.install import INSTALL_SCHEMES + + try: + unix_user_platlib = INSTALL_SCHEMES["unix_user"]["platlib"] + except KeyError: + return False + return unix_user_platlib == "$usersite" + + +def _looks_like_red_hat_patched_platlib_purelib(scheme: Dict[str, str]) -> bool: + platlib = scheme["platlib"] + if "/$platlibdir/" in platlib: + platlib = platlib.replace("/$platlibdir/", f"/{_PLATLIBDIR}/") + if "/lib64/" not in platlib: + return False + unpatched = platlib.replace("/lib64/", "/lib/") + return unpatched.replace("$platbase/", "$base/") == scheme["purelib"] + + +@functools.lru_cache(maxsize=None) +def _looks_like_red_hat_lib() -> bool: + """Red Hat patches platlib in unix_prefix and unix_home, but not purelib. + + This is the only way I can see to tell a Red Hat-patched Python. + """ + from distutils.command.install import INSTALL_SCHEMES + + return all( + k in INSTALL_SCHEMES + and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k]) + for k in ("unix_prefix", "unix_home") + ) + + +@functools.lru_cache(maxsize=None) +def _looks_like_debian_scheme() -> bool: + """Debian adds two additional schemes.""" + from distutils.command.install import INSTALL_SCHEMES + + return "deb_system" in INSTALL_SCHEMES and "unix_local" in INSTALL_SCHEMES + + +@functools.lru_cache(maxsize=None) +def _looks_like_red_hat_scheme() -> bool: + """Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``. + + Red Hat's ``00251-change-user-install-location.patch`` changes the install + command's ``prefix`` and ``exec_prefix`` to append ``"/local"``. This is + (fortunately?) done quite unconditionally, so we create a default command + object without any configuration to detect this. + """ + from distutils.command.install import install + from distutils.dist import Distribution + + cmd: Any = install(Distribution()) + cmd.finalize_options() + return ( + cmd.exec_prefix == f"{os.path.normpath(sys.exec_prefix)}/local" + and cmd.prefix == f"{os.path.normpath(sys.prefix)}/local" + ) + + +@functools.lru_cache(maxsize=None) +def _looks_like_slackware_scheme() -> bool: + """Slackware patches sysconfig but fails to patch distutils and site. + + Slackware changes sysconfig's user scheme to use ``"lib64"`` for the lib + path, but does not do the same to the site module. + """ + if user_site is None: # User-site not available. + return False + try: + paths = sysconfig.get_paths(scheme="posix_user", expand=False) + except KeyError: # User-site not available. return False - issue_url = "https://github.com/pypa/pip/issues/9617" + return "/lib64/" in paths["purelib"] and "/lib64/" not in user_site + + +@functools.lru_cache(maxsize=None) +def _looks_like_msys2_mingw_scheme() -> bool: + """MSYS2 patches distutils and sysconfig to use a UNIX-like scheme. + + However, MSYS2 incorrectly patches sysconfig ``nt`` scheme. The fix is + likely going to be included in their 3.10 release, so we ignore the warning. + See msys2/MINGW-packages#9319. + + MSYS2 MINGW's patch uses lowercase ``"lib"`` instead of the usual uppercase, + and is missing the final ``"site-packages"``. + """ + paths = sysconfig.get_paths("nt", expand=False) + return all( + "Lib" not in p and "lib" in p and not p.endswith("site-packages") + for p in (paths[key] for key in ("platlib", "purelib")) + ) + + +def _fix_abiflags(parts: Tuple[str]) -> Generator[str, None, None]: + ldversion = sysconfig.get_config_var("LDVERSION") + abiflags = getattr(sys, "abiflags", None) + + # LDVERSION does not end with sys.abiflags. Just return the path unchanged. + if not ldversion or not abiflags or not ldversion.endswith(abiflags): + yield from parts + return + + # Strip sys.abiflags from LDVERSION-based path components. + for part in parts: + if part.endswith(ldversion): + part = part[: (0 - len(abiflags))] + yield part + + +@functools.lru_cache(maxsize=None) +def _warn_mismatched(old: pathlib.Path, new: pathlib.Path, *, key: str) -> None: + issue_url = "https://github.com/pypa/pip/issues/10151" message = ( "Value for %s does not match. Please report this to <%s>" "\ndistutils: %s" "\nsysconfig: %s" ) - logger.debug(message, key, issue_url, old, new) + logger.log(_MISMATCH_LEVEL, message, key, issue_url, old, new) + + +def _warn_if_mismatch(old: pathlib.Path, new: pathlib.Path, *, key: str) -> bool: + if old == new: + return False + _warn_mismatched(old, new, key=key) return True +@functools.lru_cache(maxsize=None) def _log_context( *, user: bool = False, @@ -62,22 +216,26 @@ def _log_context( root: Optional[str] = None, prefix: Optional[str] = None, ) -> None: - message = ( - "Additional context:" "\nuser = %r" "\nhome = %r" "\nroot = %r" "\nprefix = %r" - ) - logger.debug(message, user, home, root, prefix) + parts = [ + "Additional context:", + "user = %r", + "home = %r", + "root = %r", + "prefix = %r", + ] + + logger.log(_MISMATCH_LEVEL, "\n".join(parts), user, home, root, prefix) def get_scheme( - dist_name, # type: str - user=False, # type: bool - home=None, # type: Optional[str] - root=None, # type: Optional[str] - isolated=False, # type: bool - prefix=None, # type: Optional[str] -): - # type: (...) -> Scheme - old = _distutils.get_scheme( + dist_name: str, + user: bool = False, + home: Optional[str] = None, + root: Optional[str] = None, + isolated: bool = False, + prefix: Optional[str] = None, +) -> Scheme: + new = _sysconfig.get_scheme( dist_name, user=user, home=home, @@ -85,7 +243,10 @@ def get_scheme( isolated=isolated, prefix=prefix, ) - new = _sysconfig.get_scheme( + if _USE_SYSCONFIG: + return new + + old = _distutils.get_scheme( dist_name, user=user, home=home, @@ -94,13 +255,14 @@ def get_scheme( prefix=prefix, ) - base = prefix or home or _default_base(user=user) - warned = [] + warning_contexts = [] for k in SCHEME_KEYS: - # Extra join because distutils can return relative paths. - old_v = pathlib.Path(base, getattr(old, k)) + old_v = pathlib.Path(getattr(old, k)) new_v = pathlib.Path(getattr(new, k)) + if old_v == new_v: + continue + # distutils incorrectly put PyPy packages under ``site-packages/python`` # in the ``posix_home`` scheme, but PyPy devs said they expect the # directory name to be ``pypy`` instead. So we treat this as a bug fix @@ -110,75 +272,196 @@ def get_scheme( and home is not None and k in ("platlib", "purelib") and old_v.parent == new_v.parent - and old_v.name == "python" - and new_v.name == "pypy" + and old_v.name.startswith("python") + and new_v.name.startswith("pypy") ) if skip_pypy_special_case: continue - warned.append(_warn_if_mismatch(old_v, new_v, key=f"scheme.{k}")) + # sysconfig's ``osx_framework_user`` does not include ``pythonX.Y`` in + # the ``include`` value, but distutils's ``headers`` does. We'll let + # CPython decide whether this is a bug or feature. See bpo-43948. + skip_osx_framework_user_special_case = ( + user + and is_osx_framework() + and k == "headers" + and old_v.parent.parent == new_v.parent + and old_v.parent.name.startswith("python") + ) + if skip_osx_framework_user_special_case: + continue + + # On Red Hat and derived Linux distributions, distutils is patched to + # use "lib64" instead of "lib" for platlib. + if k == "platlib" and _looks_like_red_hat_lib(): + continue + + # On Python 3.9+, sysconfig's posix_user scheme sets platlib against + # sys.platlibdir, but distutils's unix_user incorrectly coninutes + # using the same $usersite for both platlib and purelib. This creates a + # mismatch when sys.platlibdir is not "lib". + skip_bpo_44860 = ( + user + and k == "platlib" + and not WINDOWS + and sys.version_info >= (3, 9) + and _PLATLIBDIR != "lib" + and _looks_like_bpo_44860() + ) + if skip_bpo_44860: + continue + + # Slackware incorrectly patches posix_user to use lib64 instead of lib, + # but not usersite to match the location. + skip_slackware_user_scheme = ( + user + and k in ("platlib", "purelib") + and not WINDOWS + and _looks_like_slackware_scheme() + ) + if skip_slackware_user_scheme: + continue + + # Both Debian and Red Hat patch Python to place the system site under + # /usr/local instead of /usr. Debian also places lib in dist-packages + # instead of site-packages, but the /usr/local check should cover it. + skip_linux_system_special_case = ( + not (user or home or prefix or running_under_virtualenv()) + and old_v.parts[1:3] == ("usr", "local") + and len(new_v.parts) > 1 + and new_v.parts[1] == "usr" + and (len(new_v.parts) < 3 or new_v.parts[2] != "local") + and (_looks_like_red_hat_scheme() or _looks_like_debian_scheme()) + ) + if skip_linux_system_special_case: + continue + + # On Python 3.7 and earlier, sysconfig does not include sys.abiflags in + # the "pythonX.Y" part of the path, but distutils does. + skip_sysconfig_abiflag_bug = ( + sys.version_info < (3, 8) + and not WINDOWS + and k in ("headers", "platlib", "purelib") + and tuple(_fix_abiflags(old_v.parts)) == new_v.parts + ) + if skip_sysconfig_abiflag_bug: + continue + + # MSYS2 MINGW's sysconfig patch does not include the "site-packages" + # part of the path. This is incorrect and will be fixed in MSYS. + skip_msys2_mingw_bug = ( + WINDOWS and k in ("platlib", "purelib") and _looks_like_msys2_mingw_scheme() + ) + if skip_msys2_mingw_bug: + continue + + # CPython's POSIX install script invokes pip (via ensurepip) against the + # interpreter located in the source tree, not the install site. This + # triggers special logic in sysconfig that's not present in distutils. + # https://github.com/python/cpython/blob/8c21941ddaf/Lib/sysconfig.py#L178-L194 + skip_cpython_build = ( + sysconfig.is_python_build(check_home=True) + and not WINDOWS + and k in ("headers", "include", "platinclude") + ) + if skip_cpython_build: + continue + + warning_contexts.append((old_v, new_v, f"scheme.{k}")) + + if not warning_contexts: + return old + + # Check if this path mismatch is caused by distutils config files. Those + # files will no longer work once we switch to sysconfig, so this raises a + # deprecation message for them. + default_old = _distutils.distutils_scheme( + dist_name, + user, + home, + root, + isolated, + prefix, + ignore_config_files=True, + ) + if any(default_old[k] != getattr(old, k) for k in SCHEME_KEYS): + deprecated( + reason=( + "Configuring installation scheme with distutils config files " + "is deprecated and will no longer work in the near future. If you " + "are using a Homebrew or Linuxbrew Python, please see discussion " + "at https://github.com/Homebrew/homebrew-core/issues/76621" + ), + replacement=None, + gone_in=None, + ) + return old - if any(warned): - _log_context(user=user, home=home, root=root, prefix=prefix) + # Post warnings about this mismatch so user can report them back. + for old_v, new_v, key in warning_contexts: + _warn_mismatched(old_v, new_v, key=key) + _log_context(user=user, home=home, root=root, prefix=prefix) return old -def get_bin_prefix(): - # type: () -> str - old = _distutils.get_bin_prefix() +def get_bin_prefix() -> str: new = _sysconfig.get_bin_prefix() + if _USE_SYSCONFIG: + return new + + old = _distutils.get_bin_prefix() if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="bin_prefix"): _log_context() return old -def get_bin_user(): - # type: () -> str +def get_bin_user() -> str: return _sysconfig.get_scheme("", user=True).scripts -def get_purelib(): - # type: () -> str +def _looks_like_deb_system_dist_packages(value: str) -> bool: + """Check if the value is Debian's APT-controlled dist-packages. + + Debian's ``distutils.sysconfig.get_python_lib()`` implementation returns the + default package path controlled by APT, but does not patch ``sysconfig`` to + do the same. This is similar to the bug worked around in ``get_scheme()``, + but here the default is ``deb_system`` instead of ``unix_local``. Ultimately + we can't do anything about this Debian bug, and this detection allows us to + skip the warning when needed. + """ + if not _looks_like_debian_scheme(): + return False + if value == "/usr/lib/python3/dist-packages": + return True + return False + + +def get_purelib() -> str: """Return the default pure-Python lib location.""" - old = _distutils.get_purelib() new = _sysconfig.get_purelib() + if _USE_SYSCONFIG: + return new + + old = _distutils.get_purelib() + if _looks_like_deb_system_dist_packages(old): + return old if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="purelib"): _log_context() return old -def get_platlib(): - # type: () -> str +def get_platlib() -> str: """Return the default platform-shared lib location.""" - old = _distutils.get_platlib() new = _sysconfig.get_platlib() + if _USE_SYSCONFIG: + return new + + from . import _distutils + + old = _distutils.get_platlib() + if _looks_like_deb_system_dist_packages(old): + return old if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="platlib"): _log_context() return old - - -def get_prefixed_libs(prefix): - # type: (str) -> List[str] - """Return the lib locations under ``prefix``.""" - old_pure, old_plat = _distutils.get_prefixed_libs(prefix) - new_pure, new_plat = _sysconfig.get_prefixed_libs(prefix) - - warned = [ - _warn_if_mismatch( - pathlib.Path(old_pure), - pathlib.Path(new_pure), - key="prefixed-purelib", - ), - _warn_if_mismatch( - pathlib.Path(old_plat), - pathlib.Path(new_plat), - key="prefixed-platlib", - ), - ] - if any(warned): - _log_context(prefix=prefix) - - if old_pure == old_plat: - return [old_pure] - return [old_pure, old_plat] diff --git a/env/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 3d19e8715c4d0408347699bbbce4e079c03c082b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/locations/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-39.pyc deleted file mode 100644 index e13efd8ecdfb8353e2a5b63eff73d5ebe58194ce..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/locations/__pycache__/_distutils.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-39.pyc deleted file mode 100644 index 92a05c6453ca8f227db8ed333822ae0bdf066118..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/locations/__pycache__/_sysconfig.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-39.pyc deleted file mode 100644 index eed182270bed2d109b288c03b2fe65a134ee2e86..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/locations/__pycache__/base.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/locations/_distutils.py b/env/Lib/site-packages/pip/_internal/locations/_distutils.py index 2d7ab73213c096f9d9cd299baf920d6bf73bd7e6..92bd93179c5cd3cb377c8b9f1e9d22d13fd7d003 100644 --- a/env/Lib/site-packages/pip/_internal/locations/_distutils.py +++ b/env/Lib/site-packages/pip/_internal/locations/_distutils.py @@ -3,13 +3,25 @@ # The following comment should be removed at some point in the future. # mypy: strict-optional=False +# If pip's going to use distutils, it should not be using the copy that setuptools +# might have injected into the environment. This is done by removing the injected +# shim, if it's injected. +# +# See https://github.com/pypa/pip/issues/8761 for the original discussion and +# rationale for why this is done within pip. +try: + __import__("_distutils_hack").remove_shim() +except (ImportError, AttributeError): + pass + +import logging import os import sys from distutils.cmd import Command as DistutilsCommand from distutils.command.install import SCHEME_KEYS from distutils.command.install import install as distutils_install_command from distutils.sysconfig import get_python_lib -from typing import Dict, List, Optional, Tuple, Union, cast +from typing import Dict, List, Optional, Union, cast from pip._internal.models.scheme import Scheme from pip._internal.utils.compat import WINDOWS @@ -17,23 +29,40 @@ from pip._internal.utils.virtualenv import running_under_virtualenv from .base import get_major_minor_version +logger = logging.getLogger(__name__) -def _distutils_scheme( - dist_name, user=False, home=None, root=None, isolated=False, prefix=None -): - # type:(str, bool, str, str, bool, str) -> Dict[str, str] + +def distutils_scheme( + dist_name: str, + user: bool = False, + home: Optional[str] = None, + root: Optional[str] = None, + isolated: bool = False, + prefix: Optional[str] = None, + *, + ignore_config_files: bool = False, +) -> Dict[str, str]: """ Return a distutils install scheme """ from distutils.dist import Distribution - dist_args = {"name": dist_name} # type: Dict[str, Union[str, List[str]]] + dist_args: Dict[str, Union[str, List[str]]] = {"name": dist_name} if isolated: dist_args["script_args"] = ["--no-user-cfg"] d = Distribution(dist_args) - d.parse_config_files() - obj = None # type: Optional[DistutilsCommand] + if not ignore_config_files: + try: + d.parse_config_files() + except UnicodeDecodeError: + # Typeshed does not include find_config_files() for some reason. + paths = d.find_config_files() # type: ignore + logger.warning( + "Ignore distutils configs in %s due to encoding errors.", + ", ".join(os.path.basename(p) for p in paths), + ) + obj: Optional[DistutilsCommand] = None obj = d.get_command_obj("install", create=True) assert obj is not None i = cast(distutils_install_command, obj) @@ -63,8 +92,14 @@ def _distutils_scheme( scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib)) if running_under_virtualenv(): + if home: + prefix = home + elif user: + prefix = i.install_userbase + else: + prefix = i.prefix scheme["headers"] = os.path.join( - i.prefix, + prefix, "include", "site", f"python{get_major_minor_version()}", @@ -73,23 +108,19 @@ def _distutils_scheme( if root is not None: path_no_drive = os.path.splitdrive(os.path.abspath(scheme["headers"]))[1] - scheme["headers"] = os.path.join( - root, - path_no_drive[1:], - ) + scheme["headers"] = os.path.join(root, path_no_drive[1:]) return scheme def get_scheme( - dist_name, # type: str - user=False, # type: bool - home=None, # type: Optional[str] - root=None, # type: Optional[str] - isolated=False, # type: bool - prefix=None, # type: Optional[str] -): - # type: (...) -> Scheme + dist_name: str, + user: bool = False, + home: Optional[str] = None, + root: Optional[str] = None, + isolated: bool = False, + prefix: Optional[str] = None, +) -> Scheme: """ Get the "scheme" corresponding to the input parameters. The distutils documentation provides the context for the available schemes: @@ -107,7 +138,7 @@ def get_scheme( :param prefix: indicates to use the "prefix" scheme and provides the base directory for the same """ - scheme = _distutils_scheme(dist_name, user, home, root, isolated, prefix) + scheme = distutils_scheme(dist_name, user, home, root, isolated, prefix) return Scheme( platlib=scheme["platlib"], purelib=scheme["purelib"], @@ -117,34 +148,26 @@ def get_scheme( ) -def get_bin_prefix(): - # type: () -> str +def get_bin_prefix() -> str: + # XXX: In old virtualenv versions, sys.prefix can contain '..' components, + # so we need to call normpath to eliminate them. + prefix = os.path.normpath(sys.prefix) if WINDOWS: - bin_py = os.path.join(sys.prefix, "Scripts") + bin_py = os.path.join(prefix, "Scripts") # buildout uses 'bin' on Windows too? if not os.path.exists(bin_py): - bin_py = os.path.join(sys.prefix, "bin") + bin_py = os.path.join(prefix, "bin") return bin_py # Forcing to use /usr/local/bin for standard macOS framework installs # Also log to ~/Library/Logs/ for use with the Console.app log viewer - if sys.platform[:6] == "darwin" and sys.prefix[:16] == "/System/Library/": + if sys.platform[:6] == "darwin" and prefix[:16] == "/System/Library/": return "/usr/local/bin" - return os.path.join(sys.prefix, "bin") + return os.path.join(prefix, "bin") -def get_purelib(): - # type: () -> str +def get_purelib() -> str: return get_python_lib(plat_specific=False) -def get_platlib(): - # type: () -> str +def get_platlib() -> str: return get_python_lib(plat_specific=True) - - -def get_prefixed_libs(prefix): - # type: (str) -> Tuple[str, str] - return ( - get_python_lib(plat_specific=False, prefix=prefix), - get_python_lib(plat_specific=True, prefix=prefix), - ) diff --git a/env/Lib/site-packages/pip/_internal/locations/_sysconfig.py b/env/Lib/site-packages/pip/_internal/locations/_sysconfig.py index 03366ce6e7164e44ba66315d4340e33d49344abd..97aef1f1ac237e6ef97b1a1d026818d7b8ab9be9 100644 --- a/env/Lib/site-packages/pip/_internal/locations/_sysconfig.py +++ b/env/Lib/site-packages/pip/_internal/locations/_sysconfig.py @@ -1,4 +1,3 @@ -import distutils.util # FIXME: For change_root. import logging import os import sys @@ -9,14 +8,14 @@ from pip._internal.exceptions import InvalidSchemeCombination, UserInstallationI from pip._internal.models.scheme import SCHEME_KEYS, Scheme from pip._internal.utils.virtualenv import running_under_virtualenv -from .base import get_major_minor_version +from .base import change_root, get_major_minor_version, is_osx_framework logger = logging.getLogger(__name__) # Notes on _infer_* functions. -# Unfortunately ``_get_default_scheme()`` is private, so there's no way to -# ask things like "what is the '_prefix' scheme on this platform". These +# Unfortunately ``get_default_scheme()`` didn't exist before 3.10, so there's no +# way to ask things like "what is the '_prefix' scheme on this platform". These # functions try to answer that with some heuristics while accounting for ad-hoc # platforms not covered by CPython's default sysconfig implementation. If the # ad-hoc implementation does not fully implement sysconfig, we'll fall back to @@ -24,13 +23,42 @@ logger = logging.getLogger(__name__) _AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names()) +_PREFERRED_SCHEME_API = getattr(sysconfig, "get_preferred_scheme", None) -def _infer_prefix(): - # type: () -> str + +def _should_use_osx_framework_prefix() -> bool: + """Check for Apple's ``osx_framework_library`` scheme. + + Python distributed by Apple's Command Line Tools has this special scheme + that's used when: + + * This is a framework build. + * We are installing into the system prefix. + + This does not account for ``pip install --prefix`` (also means we're not + installing to the system prefix), which should use ``posix_prefix``, but + logic here means ``_infer_prefix()`` outputs ``osx_framework_library``. But + since ``prefix`` is not available for ``sysconfig.get_default_scheme()``, + which is the stdlib replacement for ``_infer_prefix()``, presumably Apple + wouldn't be able to magically switch between ``osx_framework_library`` and + ``posix_prefix``. ``_infer_prefix()`` returning ``osx_framework_library`` + means its behavior is consistent whether we use the stdlib implementation + or our own, and we deal with this special case in ``get_scheme()`` instead. + """ + return ( + "osx_framework_library" in _AVAILABLE_SCHEMES + and not running_under_virtualenv() + and is_osx_framework() + ) + + +def _infer_prefix() -> str: """Try to find a prefix scheme for the current platform. This tries: + * A special ``osx_framework_library`` for Python distributed by Apple's + Command Line Tools, when not running in a virtual environment. * Implementation + OS, used by PyPy on Windows (``pypy_nt``). * Implementation without OS, used by PyPy on POSIX (``pypy``). * OS + "prefix", used by CPython on POSIX (``posix_prefix``). @@ -38,6 +66,10 @@ def _infer_prefix(): If none of the above works, fall back to ``posix_prefix``. """ + if _PREFERRED_SCHEME_API: + return _PREFERRED_SCHEME_API("prefix") + if _should_use_osx_framework_prefix(): + return "osx_framework_library" implementation_suffixed = f"{sys.implementation.name}_{os.name}" if implementation_suffixed in _AVAILABLE_SCHEMES: return implementation_suffixed @@ -51,10 +83,14 @@ def _infer_prefix(): return "posix_prefix" -def _infer_user(): - # type: () -> str +def _infer_user() -> str: """Try to find a user scheme for the current platform.""" - suffixed = f"{os.name}_user" + if _PREFERRED_SCHEME_API: + return _PREFERRED_SCHEME_API("user") + if is_osx_framework() and not running_under_virtualenv(): + suffixed = "osx_framework_user" + else: + suffixed = f"{os.name}_user" if suffixed in _AVAILABLE_SCHEMES: return suffixed if "posix_user" not in _AVAILABLE_SCHEMES: # User scheme unavailable. @@ -62,9 +98,10 @@ def _infer_user(): return "posix_user" -def _infer_home(): - # type: () -> str +def _infer_home() -> str: """Try to find a home for the current platform.""" + if _PREFERRED_SCHEME_API: + return _PREFERRED_SCHEME_API("home") suffixed = f"{os.name}_home" if suffixed in _AVAILABLE_SCHEMES: return suffixed @@ -85,14 +122,13 @@ if sysconfig.get_config_var("userbase") is not None: def get_scheme( - dist_name, # type: str - user=False, # type: bool - home=None, # type: typing.Optional[str] - root=None, # type: typing.Optional[str] - isolated=False, # type: bool - prefix=None, # type: typing.Optional[str] -): - # type: (...) -> Scheme + dist_name: str, + user: bool = False, + home: typing.Optional[str] = None, + root: typing.Optional[str] = None, + isolated: bool = False, + prefix: typing.Optional[str] = None, +) -> Scheme: """ Get the "scheme" corresponding to the input parameters. @@ -118,6 +154,12 @@ def get_scheme( else: scheme_name = _infer_prefix() + # Special case: When installing into a custom prefix, use posix_prefix + # instead of osx_framework_library. See _should_use_osx_framework_prefix() + # docstring for details. + if prefix is not None and scheme_name == "osx_framework_library": + scheme_name = "posix_prefix" + if home is not None: variables = {k: home for k in _HOME_KEYS} elif prefix is not None: @@ -151,30 +193,21 @@ def get_scheme( ) if root is not None: for key in SCHEME_KEYS: - value = distutils.util.change_root(root, getattr(scheme, key)) + value = change_root(root, getattr(scheme, key)) setattr(scheme, key, value) return scheme -def get_bin_prefix(): - # type: () -> str +def get_bin_prefix() -> str: # Forcing to use /usr/local/bin for standard macOS framework installs. if sys.platform[:6] == "darwin" and sys.prefix[:16] == "/System/Library/": return "/usr/local/bin" return sysconfig.get_paths()["scripts"] -def get_purelib(): - # type: () -> str +def get_purelib() -> str: return sysconfig.get_paths()["purelib"] -def get_platlib(): - # type: () -> str +def get_platlib() -> str: return sysconfig.get_paths()["platlib"] - - -def get_prefixed_libs(prefix): - # type: (str) -> typing.Tuple[str, str] - paths = sysconfig.get_paths(vars={"base": prefix, "platbase": prefix}) - return (paths["purelib"], paths["platlib"]) diff --git a/env/Lib/site-packages/pip/_internal/locations/base.py b/env/Lib/site-packages/pip/_internal/locations/base.py index 98557abbe63f419b84fb15298e6ccd9d313550be..3f9f896e632e929a63e9724ab80ecdfc9761b795 100644 --- a/env/Lib/site-packages/pip/_internal/locations/base.py +++ b/env/Lib/site-packages/pip/_internal/locations/base.py @@ -1,9 +1,11 @@ +import functools import os import site import sys import sysconfig import typing +from pip._internal.exceptions import InstallationError from pip._internal.utils import appdirs from pip._internal.utils.virtualenv import running_under_virtualenv @@ -11,11 +13,10 @@ from pip._internal.utils.virtualenv import running_under_virtualenv USER_CACHE_DIR = appdirs.user_cache_dir("pip") # FIXME doesn't account for venv linked to global site-packages -site_packages = sysconfig.get_path("purelib") # type: typing.Optional[str] +site_packages: str = sysconfig.get_path("purelib") -def get_major_minor_version(): - # type: () -> str +def get_major_minor_version() -> str: """ Return the major-minor version of the current Python as a string, e.g. "3.7" or "3.10". @@ -23,8 +24,35 @@ def get_major_minor_version(): return "{}.{}".format(*sys.version_info) -def get_src_prefix(): - # type: () -> str +def change_root(new_root: str, pathname: str) -> str: + """Return 'pathname' with 'new_root' prepended. + + If 'pathname' is relative, this is equivalent to os.path.join(new_root, pathname). + Otherwise, it requires making 'pathname' relative and then joining the + two, which is tricky on DOS/Windows and Mac OS. + + This is borrowed from Python's standard library's distutils module. + """ + if os.name == "posix": + if not os.path.isabs(pathname): + return os.path.join(new_root, pathname) + else: + return os.path.join(new_root, pathname[1:]) + + elif os.name == "nt": + (drive, path) = os.path.splitdrive(pathname) + if path[0] == "\\": + path = path[1:] + return os.path.join(new_root, path) + + else: + raise InstallationError( + f"Unknown platform: {os.name}\n" + "Can not change root path prefix on unknown platform." + ) + + +def get_src_prefix() -> str: if running_under_virtualenv(): src_prefix = os.path.join(sys.prefix, "src") else: @@ -43,6 +71,11 @@ def get_src_prefix(): try: # Use getusersitepackages if this is present, as it ensures that the # value is initialised properly. - user_site = site.getusersitepackages() # type: typing.Optional[str] + user_site: typing.Optional[str] = site.getusersitepackages() except AttributeError: user_site = site.USER_SITE + + +@functools.lru_cache(maxsize=None) +def is_osx_framework() -> bool: + return bool(sysconfig.get_config_var("PYTHONFRAMEWORK")) diff --git a/env/Lib/site-packages/pip/_internal/main.py b/env/Lib/site-packages/pip/_internal/main.py index 51eee1588d900f2a5ee4bf4027bb57a337f42854..33c6d24cd85b55a9fb1b1e6ab784f471e2b135f0 100644 --- a/env/Lib/site-packages/pip/_internal/main.py +++ b/env/Lib/site-packages/pip/_internal/main.py @@ -1,8 +1,7 @@ from typing import List, Optional -def main(args=None): - # type: (Optional[List[str]]) -> int +def main(args: Optional[List[str]] = None) -> int: """This is preserved for old console scripts that may still be referencing it. diff --git a/env/Lib/site-packages/pip/_internal/metadata/__init__.py b/env/Lib/site-packages/pip/_internal/metadata/__init__.py index 63335a1919361c1b5ebd1d30d1dbdd6848e55e13..9f73ca7105ff0bf11d74dd16ffb0653059466f70 100644 --- a/env/Lib/site-packages/pip/_internal/metadata/__init__.py +++ b/env/Lib/site-packages/pip/_internal/metadata/__init__.py @@ -1,36 +1,100 @@ -from typing import List, Optional +import contextlib +import functools +import os +import sys +from typing import TYPE_CHECKING, List, Optional, Type, cast -from .base import BaseDistribution, BaseEnvironment +from pip._internal.utils.misc import strtobool +from .base import BaseDistribution, BaseEnvironment, FilesystemWheel, MemoryWheel, Wheel -def get_default_environment(): - # type: () -> BaseEnvironment +if TYPE_CHECKING: + from typing import Protocol +else: + Protocol = object + +__all__ = [ + "BaseDistribution", + "BaseEnvironment", + "FilesystemWheel", + "MemoryWheel", + "Wheel", + "get_default_environment", + "get_environment", + "get_wheel_distribution", + "select_backend", +] + + +def _should_use_importlib_metadata() -> bool: + """Whether to use the ``importlib.metadata`` or ``pkg_resources`` backend. + + By default, pip uses ``importlib.metadata`` on Python 3.11+, and + ``pkg_resourcess`` otherwise. This can be overridden by a couple of ways: + + * If environment variable ``_PIP_USE_IMPORTLIB_METADATA`` is set, it + dictates whether ``importlib.metadata`` is used, regardless of Python + version. + * On Python 3.11+, Python distributors can patch ``importlib.metadata`` + to add a global constant ``_PIP_USE_IMPORTLIB_METADATA = False``. This + makes pip use ``pkg_resources`` (unless the user set the aforementioned + environment variable to *True*). + """ + with contextlib.suppress(KeyError, ValueError): + return bool(strtobool(os.environ["_PIP_USE_IMPORTLIB_METADATA"])) + if sys.version_info < (3, 11): + return False + import importlib.metadata + + return bool(getattr(importlib.metadata, "_PIP_USE_IMPORTLIB_METADATA", True)) + + +class Backend(Protocol): + Distribution: Type[BaseDistribution] + Environment: Type[BaseEnvironment] + + +@functools.lru_cache(maxsize=None) +def select_backend() -> Backend: + if _should_use_importlib_metadata(): + from . import importlib + + return cast(Backend, importlib) + from . import pkg_resources + + return cast(Backend, pkg_resources) + + +def get_default_environment() -> BaseEnvironment: """Get the default representation for the current environment. This returns an Environment instance from the chosen backend. The default Environment instance should be built from ``sys.path`` and may use caching to share instance state accorss calls. """ - from .pkg_resources import Environment + return select_backend().Environment.default() - return Environment.default() - -def get_environment(paths): - # type: (Optional[List[str]]) -> BaseEnvironment +def get_environment(paths: Optional[List[str]]) -> BaseEnvironment: """Get a representation of the environment specified by ``paths``. This returns an Environment instance from the chosen backend based on the given import paths. The backend must build a fresh instance representing the state of installed distributions when this function is called. """ - from .pkg_resources import Environment + return select_backend().Environment.from_paths(paths) + + +def get_directory_distribution(directory: str) -> BaseDistribution: + """Get the distribution metadata representation in the specified directory. - return Environment.from_paths(paths) + This returns a Distribution instance from the chosen backend based on + the given on-disk ``.dist-info`` directory. + """ + return select_backend().Distribution.from_directory(directory) -def get_wheel_distribution(wheel_path, canonical_name): - # type: (str, str) -> BaseDistribution +def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistribution: """Get the representation of the specified wheel's distribution metadata. This returns a Distribution instance from the chosen backend based on @@ -38,6 +102,26 @@ def get_wheel_distribution(wheel_path, canonical_name): :param canonical_name: Normalized project name of the given wheel. """ - from .pkg_resources import Distribution + return select_backend().Distribution.from_wheel(wheel, canonical_name) + - return Distribution.from_wheel(wheel_path, canonical_name) +def get_metadata_distribution( + metadata_contents: bytes, + filename: str, + canonical_name: str, +) -> BaseDistribution: + """Get the dist representation of the specified METADATA file contents. + + This returns a Distribution instance from the chosen backend sourced from the data + in `metadata_contents`. + + :param metadata_contents: Contents of a METADATA file within a dist, or one served + via PEP 658. + :param filename: Filename for the dist this metadata represents. + :param canonical_name: Normalized project name of the given dist. + """ + return select_backend().Distribution.from_metadata_file_contents( + metadata_contents, + filename, + canonical_name, + ) diff --git a/env/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index bea5930d53a0c4717b2f4cfcd5dc5ca87ed994c8..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/metadata/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-39.pyc deleted file mode 100644 index 15d4d9cd43929bb8621359d7e234a9b49100ce51..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/metadata/__pycache__/base.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-39.pyc deleted file mode 100644 index cd3d97162b0966eb1f7c5aad8187877a7c799777..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/metadata/__pycache__/pkg_resources.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/metadata/base.py b/env/Lib/site-packages/pip/_internal/metadata/base.py index 37f9a8232f6c47a9d4d094b36dcf1e66dbd7fb2b..cafb79fb3dcf43744393e2964056fe32c350bbc1 100644 --- a/env/Lib/site-packages/pip/_internal/metadata/base.py +++ b/env/Lib/site-packages/pip/_internal/metadata/base.py @@ -1,85 +1,604 @@ +import csv +import email.message +import functools +import json import logging +import pathlib import re -from typing import Container, Iterator, List, Optional, Union +import zipfile +from typing import ( + IO, + TYPE_CHECKING, + Any, + Collection, + Container, + Dict, + Iterable, + Iterator, + List, + NamedTuple, + Optional, + Tuple, + Union, +) +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet +from pip._vendor.packaging.utils import NormalizedName from pip._vendor.packaging.version import LegacyVersion, Version -from pip._internal.utils.misc import stdlib_pkgs # TODO: Move definition here. +from pip._internal.exceptions import NoneMetadataError +from pip._internal.locations import site_packages, user_site +from pip._internal.models.direct_url import ( + DIRECT_URL_METADATA_NAME, + DirectUrl, + DirectUrlValidationError, +) +from pip._internal.utils.compat import stdlib_pkgs # TODO: Move definition here. +from pip._internal.utils.egg_link import egg_link_path_from_sys_path +from pip._internal.utils.misc import is_local, normalize_path +from pip._internal.utils.packaging import safe_extra +from pip._internal.utils.urls import url_to_path + +from ._json import msg_to_json + +if TYPE_CHECKING: + from typing import Protocol +else: + Protocol = object DistributionVersion = Union[LegacyVersion, Version] +InfoPath = Union[str, pathlib.PurePath] + logger = logging.getLogger(__name__) -class BaseDistribution: +class BaseEntryPoint(Protocol): + @property + def name(self) -> str: + raise NotImplementedError() + + @property + def value(self) -> str: + raise NotImplementedError() + + @property + def group(self) -> str: + raise NotImplementedError() + + +def _convert_installed_files_path( + entry: Tuple[str, ...], + info: Tuple[str, ...], +) -> str: + """Convert a legacy installed-files.txt path into modern RECORD path. + + The legacy format stores paths relative to the info directory, while the + modern format stores paths relative to the package root, e.g. the + site-packages directory. + + :param entry: Path parts of the installed-files.txt entry. + :param info: Path parts of the egg-info directory relative to package root. + :returns: The converted entry. + + For best compatibility with symlinks, this does not use ``abspath()`` or + ``Path.resolve()``, but tries to work with path parts: + + 1. While ``entry`` starts with ``..``, remove the equal amounts of parts + from ``info``; if ``info`` is empty, start appending ``..`` instead. + 2. Join the two directly. + """ + while entry and entry[0] == "..": + if not info or info[-1] == "..": + info += ("..",) + else: + info = info[:-1] + entry = entry[1:] + return str(pathlib.Path(*info, *entry)) + + +class RequiresEntry(NamedTuple): + requirement: str + extra: str + marker: str + + +class BaseDistribution(Protocol): + @classmethod + def from_directory(cls, directory: str) -> "BaseDistribution": + """Load the distribution from a metadata directory. + + :param directory: Path to a metadata directory, e.g. ``.dist-info``. + """ + raise NotImplementedError() + + @classmethod + def from_metadata_file_contents( + cls, + metadata_contents: bytes, + filename: str, + project_name: str, + ) -> "BaseDistribution": + """Load the distribution from the contents of a METADATA file. + + This is used to implement PEP 658 by generating a "shallow" dist object that can + be used for resolution without downloading or building the actual dist yet. + + :param metadata_contents: The contents of a METADATA file. + :param filename: File name for the dist with this metadata. + :param project_name: Name of the project this dist represents. + """ + raise NotImplementedError() + + @classmethod + def from_wheel(cls, wheel: "Wheel", name: str) -> "BaseDistribution": + """Load the distribution from a given wheel. + + :param wheel: A concrete wheel definition. + :param name: File name of the wheel. + + :raises InvalidWheel: Whenever loading of the wheel causes a + :py:exc:`zipfile.BadZipFile` exception to be thrown. + :raises UnsupportedWheel: If the wheel is a valid zip, but malformed + internally. + """ + raise NotImplementedError() + + def __repr__(self) -> str: + return f"{self.raw_name} {self.version} ({self.location})" + + def __str__(self) -> str: + return f"{self.raw_name} {self.version}" + @property - def location(self): - # type: () -> Optional[str] + def location(self) -> Optional[str]: """Where the distribution is loaded from. A string value is not necessarily a filesystem path, since distributions can be loaded from other sources, e.g. arbitrary zip archives. ``None`` means the distribution is created in-memory. + + Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If + this is a symbolic link, we want to preserve the relative path between + it and files in the distribution. """ raise NotImplementedError() @property - def metadata_version(self): - # type: () -> Optional[str] - """Value of "Metadata-Version:" in the distribution, if available.""" - raise NotImplementedError() + def editable_project_location(self) -> Optional[str]: + """The project location for editable distributions. + + This is the directory where pyproject.toml or setup.py is located. + None if the distribution is not installed in editable mode. + """ + # TODO: this property is relatively costly to compute, memoize it ? + direct_url = self.direct_url + if direct_url: + if direct_url.is_local_editable(): + return url_to_path(direct_url.url) + else: + # Search for an .egg-link file by walking sys.path, as it was + # done before by dist_is_editable(). + egg_link_path = egg_link_path_from_sys_path(self.raw_name) + if egg_link_path: + # TODO: get project location from second line of egg_link file + # (https://github.com/pypa/pip/issues/10243) + return self.location + return None @property - def canonical_name(self): - # type: () -> str + def installed_location(self) -> Optional[str]: + """The distribution's "installed" location. + + This should generally be a ``site-packages`` directory. This is + usually ``dist.location``, except for legacy develop-installed packages, + where ``dist.location`` is the source code location, and this is where + the ``.egg-link`` file is. + + The returned location is normalized (in particular, with symlinks removed). + """ raise NotImplementedError() @property - def version(self): - # type: () -> DistributionVersion + def info_location(self) -> Optional[str]: + """Location of the .[egg|dist]-info directory or file. + + Similarly to ``location``, a string value is not necessarily a + filesystem path. ``None`` means the distribution is created in-memory. + + For a modern .dist-info installation on disk, this should be something + like ``{location}/{raw_name}-{version}.dist-info``. + + Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If + this is a symbolic link, we want to preserve the relative path between + it and other files in the distribution. + """ raise NotImplementedError() @property - def installer(self): - # type: () -> str + def installed_by_distutils(self) -> bool: + """Whether this distribution is installed with legacy distutils format. + + A distribution installed with "raw" distutils not patched by setuptools + uses one single file at ``info_location`` to store metadata. We need to + treat this specially on uninstallation. + """ + info_location = self.info_location + if not info_location: + return False + return pathlib.Path(info_location).is_file() + + @property + def installed_as_egg(self) -> bool: + """Whether this distribution is installed as an egg. + + This usually indicates the distribution was installed by (older versions + of) easy_install. + """ + location = self.location + if not location: + return False + return location.endswith(".egg") + + @property + def installed_with_setuptools_egg_info(self) -> bool: + """Whether this distribution is installed with the ``.egg-info`` format. + + This usually indicates the distribution was installed with setuptools + with an old pip version or with ``single-version-externally-managed``. + + Note that this ensure the metadata store is a directory. distutils can + also installs an ``.egg-info``, but as a file, not a directory. This + property is *False* for that case. Also see ``installed_by_distutils``. + """ + info_location = self.info_location + if not info_location: + return False + if not info_location.endswith(".egg-info"): + return False + return pathlib.Path(info_location).is_dir() + + @property + def installed_with_dist_info(self) -> bool: + """Whether this distribution is installed with the "modern format". + + This indicates a "modern" installation, e.g. storing metadata in the + ``.dist-info`` directory. This applies to installations made by + setuptools (but through pip, not directly), or anything using the + standardized build backend interface (PEP 517). + """ + info_location = self.info_location + if not info_location: + return False + if not info_location.endswith(".dist-info"): + return False + return pathlib.Path(info_location).is_dir() + + @property + def canonical_name(self) -> NormalizedName: raise NotImplementedError() @property - def editable(self): - # type: () -> bool + def version(self) -> DistributionVersion: raise NotImplementedError() @property - def local(self): - # type: () -> bool + def setuptools_filename(self) -> str: + """Convert a project name to its setuptools-compatible filename. + + This is a copy of ``pkg_resources.to_filename()`` for compatibility. + """ + return self.raw_name.replace("-", "_") + + @property + def direct_url(self) -> Optional[DirectUrl]: + """Obtain a DirectUrl from this distribution. + + Returns None if the distribution has no `direct_url.json` metadata, + or if `direct_url.json` is invalid. + """ + try: + content = self.read_text(DIRECT_URL_METADATA_NAME) + except FileNotFoundError: + return None + try: + return DirectUrl.from_json(content) + except ( + UnicodeDecodeError, + json.JSONDecodeError, + DirectUrlValidationError, + ) as e: + logger.warning( + "Error parsing %s for %s: %s", + DIRECT_URL_METADATA_NAME, + self.canonical_name, + e, + ) + return None + + @property + def installer(self) -> str: + try: + installer_text = self.read_text("INSTALLER") + except (OSError, ValueError, NoneMetadataError): + return "" # Fail silently if the installer file cannot be read. + for line in installer_text.splitlines(): + cleaned_line = line.strip() + if cleaned_line: + return cleaned_line + return "" + + @property + def requested(self) -> bool: + return self.is_file("REQUESTED") + + @property + def editable(self) -> bool: + return bool(self.editable_project_location) + + @property + def local(self) -> bool: + """If distribution is installed in the current virtual environment. + + Always True if we're not in a virtualenv. + """ + if self.installed_location is None: + return False + return is_local(self.installed_location) + + @property + def in_usersite(self) -> bool: + if self.installed_location is None or user_site is None: + return False + return self.installed_location.startswith(normalize_path(user_site)) + + @property + def in_site_packages(self) -> bool: + if self.installed_location is None or site_packages is None: + return False + return self.installed_location.startswith(normalize_path(site_packages)) + + def is_file(self, path: InfoPath) -> bool: + """Check whether an entry in the info directory is a file.""" + raise NotImplementedError() + + def iter_distutils_script_names(self) -> Iterator[str]: + """Find distutils 'scripts' entries metadata. + + If 'scripts' is supplied in ``setup.py``, distutils records those in the + installed distribution's ``scripts`` directory, a file for each script. + """ + raise NotImplementedError() + + def read_text(self, path: InfoPath) -> str: + """Read a file in the info directory. + + :raise FileNotFoundError: If ``path`` does not exist in the directory. + :raise NoneMetadataError: If ``path`` exists in the info directory, but + cannot be read. + """ + raise NotImplementedError() + + def iter_entry_points(self) -> Iterable[BaseEntryPoint]: + raise NotImplementedError() + + def _metadata_impl(self) -> email.message.Message: raise NotImplementedError() + @functools.lru_cache(maxsize=1) + def _metadata_cached(self) -> email.message.Message: + # When we drop python 3.7 support, move this to the metadata property and use + # functools.cached_property instead of lru_cache. + metadata = self._metadata_impl() + self._add_egg_info_requires(metadata) + return metadata + + @property + def metadata(self) -> email.message.Message: + """Metadata of distribution parsed from e.g. METADATA or PKG-INFO. + + This should return an empty message if the metadata file is unavailable. + + :raises NoneMetadataError: If the metadata file is available, but does + not contain valid metadata. + """ + return self._metadata_cached() + + @property + def metadata_dict(self) -> Dict[str, Any]: + """PEP 566 compliant JSON-serializable representation of METADATA or PKG-INFO. + + This should return an empty dict if the metadata file is unavailable. + + :raises NoneMetadataError: If the metadata file is available, but does + not contain valid metadata. + """ + return msg_to_json(self.metadata) + + @property + def metadata_version(self) -> Optional[str]: + """Value of "Metadata-Version:" in distribution metadata, if available.""" + return self.metadata.get("Metadata-Version") + + @property + def raw_name(self) -> str: + """Value of "Name:" in distribution metadata.""" + # The metadata should NEVER be missing the Name: key, but if it somehow + # does, fall back to the known canonical name. + return self.metadata.get("Name", self.canonical_name) + @property - def in_usersite(self): - # type: () -> bool + def requires_python(self) -> SpecifierSet: + """Value of "Requires-Python:" in distribution metadata. + + If the key does not exist or contains an invalid value, an empty + SpecifierSet should be returned. + """ + value = self.metadata.get("Requires-Python") + if value is None: + return SpecifierSet() + try: + # Convert to str to satisfy the type checker; this can be a Header object. + spec = SpecifierSet(str(value)) + except InvalidSpecifier as e: + message = "Package %r has an invalid Requires-Python: %s" + logger.warning(message, self.raw_name, e) + return SpecifierSet() + return spec + + def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: + """Dependencies of this distribution. + + For modern .dist-info distributions, this is the collection of + "Requires-Dist:" entries in distribution metadata. + """ + raise NotImplementedError() + + def iter_provided_extras(self) -> Iterable[str]: + """Extras provided by this distribution. + + For modern .dist-info distributions, this is the collection of + "Provides-Extra:" entries in distribution metadata. + """ raise NotImplementedError() + def _iter_declared_entries_from_record(self) -> Optional[Iterator[str]]: + try: + text = self.read_text("RECORD") + except FileNotFoundError: + return None + # This extra Path-str cast normalizes entries. + return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines())) + + def _iter_declared_entries_from_legacy(self) -> Optional[Iterator[str]]: + try: + text = self.read_text("installed-files.txt") + except FileNotFoundError: + return None + paths = (p for p in text.splitlines(keepends=False) if p) + root = self.location + info = self.info_location + if root is None or info is None: + return paths + try: + info_rel = pathlib.Path(info).relative_to(root) + except ValueError: # info is not relative to root. + return paths + if not info_rel.parts: # info *is* root. + return paths + return ( + _convert_installed_files_path(pathlib.Path(p).parts, info_rel.parts) + for p in paths + ) + + def iter_declared_entries(self) -> Optional[Iterator[str]]: + """Iterate through file entries declared in this distribution. + + For modern .dist-info distributions, this is the files listed in the + ``RECORD`` metadata file. For legacy setuptools distributions, this + comes from ``installed-files.txt``, with entries normalized to be + compatible with the format used by ``RECORD``. + + :return: An iterator for listed entries, or None if the distribution + contains neither ``RECORD`` nor ``installed-files.txt``. + """ + return ( + self._iter_declared_entries_from_record() + or self._iter_declared_entries_from_legacy() + ) + + def _iter_requires_txt_entries(self) -> Iterator[RequiresEntry]: + """Parse a ``requires.txt`` in an egg-info directory. + + This is an INI-ish format where an egg-info stores dependencies. A + section name describes extra other environment markers, while each entry + is an arbitrary string (not a key-value pair) representing a dependency + as a requirement string (no markers). + + There is a construct in ``importlib.metadata`` called ``Sectioned`` that + does mostly the same, but the format is currently considered private. + """ + try: + content = self.read_text("requires.txt") + except FileNotFoundError: + return + extra = marker = "" # Section-less entries don't have markers. + for line in content.splitlines(): + line = line.strip() + if not line or line.startswith("#"): # Comment; ignored. + continue + if line.startswith("[") and line.endswith("]"): # A section header. + extra, _, marker = line.strip("[]").partition(":") + continue + yield RequiresEntry(requirement=line, extra=extra, marker=marker) + + def _iter_egg_info_extras(self) -> Iterable[str]: + """Get extras from the egg-info directory.""" + known_extras = {""} + for entry in self._iter_requires_txt_entries(): + if entry.extra in known_extras: + continue + known_extras.add(entry.extra) + yield entry.extra + + def _iter_egg_info_dependencies(self) -> Iterable[str]: + """Get distribution dependencies from the egg-info directory. + + To ease parsing, this converts a legacy dependency entry into a PEP 508 + requirement string. Like ``_iter_requires_txt_entries()``, there is code + in ``importlib.metadata`` that does mostly the same, but not do exactly + what we need. + + Namely, ``importlib.metadata`` does not normalize the extra name before + putting it into the requirement string, which causes marker comparison + to fail because the dist-info format do normalize. This is consistent in + all currently available PEP 517 backends, although not standardized. + """ + for entry in self._iter_requires_txt_entries(): + if entry.extra and entry.marker: + marker = f'({entry.marker}) and extra == "{safe_extra(entry.extra)}"' + elif entry.extra: + marker = f'extra == "{safe_extra(entry.extra)}"' + elif entry.marker: + marker = entry.marker + else: + marker = "" + if marker: + yield f"{entry.requirement} ; {marker}" + else: + yield entry.requirement + + def _add_egg_info_requires(self, metadata: email.message.Message) -> None: + """Add egg-info requires.txt information to the metadata.""" + if not metadata.get_all("Requires-Dist"): + for dep in self._iter_egg_info_dependencies(): + metadata["Requires-Dist"] = dep + if not metadata.get_all("Provides-Extra"): + for extra in self._iter_egg_info_extras(): + metadata["Provides-Extra"] = extra + class BaseEnvironment: """An environment containing distributions to introspect.""" @classmethod - def default(cls): - # type: () -> BaseEnvironment + def default(cls) -> "BaseEnvironment": raise NotImplementedError() @classmethod - def from_paths(cls, paths): - # type: (Optional[List[str]]) -> BaseEnvironment + def from_paths(cls, paths: Optional[List[str]]) -> "BaseEnvironment": raise NotImplementedError() - def get_distribution(self, name): - # type: (str) -> Optional[BaseDistribution] - """Given a requirement name, return the installed distributions.""" + def get_distribution(self, name: str) -> Optional["BaseDistribution"]: + """Given a requirement name, return the installed distributions. + + The name may not be normalized. The implementation must canonicalize + it for lookup. + """ raise NotImplementedError() - def _iter_distributions(self): - # type: () -> Iterator[BaseDistribution] + def _iter_distributions(self) -> Iterator["BaseDistribution"]: """Iterate through installed distributions. This function should be implemented by subclass, but never called @@ -88,9 +607,8 @@ class BaseEnvironment: """ raise NotImplementedError() - def iter_distributions(self): - # type: () -> Iterator[BaseDistribution] - """Iterate through installed distributions.""" + def iter_all_distributions(self) -> Iterator[BaseDistribution]: + """Iterate through all installed distributions without any filtering.""" for dist in self._iter_distributions(): # Make sure the distribution actually comes from a valid Python # packaging distribution. Pip's AdjacentTempDirectory leaves folders @@ -112,15 +630,19 @@ class BaseEnvironment: def iter_installed_distributions( self, - local_only=True, # type: bool - skip=stdlib_pkgs, # type: Container[str] - include_editables=True, # type: bool - editables_only=False, # type: bool - user_only=False, # type: bool - ): - # type: (...) -> Iterator[BaseDistribution] + local_only: bool = True, + skip: Container[str] = stdlib_pkgs, + include_editables: bool = True, + editables_only: bool = False, + user_only: bool = False, + ) -> Iterator[BaseDistribution]: """Return a list of installed distributions. + This is based on ``iter_all_distributions()`` with additional filtering + options. Note that ``iter_installed_distributions()`` without arguments + is *not* equal to ``iter_all_distributions()``, since some of the + configurations exclude packages by default. + :param local_only: If True (default), only return installations local to the current virtualenv, if in a virtualenv. :param skip: An iterable of canonicalized project names to ignore; @@ -130,7 +652,7 @@ class BaseEnvironment: :param user_only: If True, only report installations in the user site directory. """ - it = self.iter_distributions() + it = self.iter_all_distributions() if local_only: it = (d for d in it if d.local) if not include_editables: @@ -140,3 +662,27 @@ class BaseEnvironment: if user_only: it = (d for d in it if d.in_usersite) return (d for d in it if d.canonical_name not in skip) + + +class Wheel(Protocol): + location: str + + def as_zipfile(self) -> zipfile.ZipFile: + raise NotImplementedError() + + +class FilesystemWheel(Wheel): + def __init__(self, location: str) -> None: + self.location = location + + def as_zipfile(self) -> zipfile.ZipFile: + return zipfile.ZipFile(self.location, allowZip64=True) + + +class MemoryWheel(Wheel): + def __init__(self, location: str, stream: IO[bytes]) -> None: + self.location = location + self.stream = stream + + def as_zipfile(self) -> zipfile.ZipFile: + return zipfile.ZipFile(self.stream, allowZip64=True) diff --git a/env/Lib/site-packages/pip/_internal/metadata/pkg_resources.py b/env/Lib/site-packages/pip/_internal/metadata/pkg_resources.py index f39a39ebebfb913cc6fc30c307d53ffca1052dba..f330ef12a2c5ea0a4adbecbeea389741479d5eb4 100644 --- a/env/Lib/site-packages/pip/_internal/metadata/pkg_resources.py +++ b/env/Lib/site-packages/pip/_internal/metadata/pkg_resources.py @@ -1,104 +1,253 @@ +import email.message +import email.parser +import logging +import os import zipfile -from typing import Iterator, List, Optional +from typing import Collection, Iterable, Iterator, List, Mapping, NamedTuple, Optional from pip._vendor import pkg_resources -from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name from pip._vendor.packaging.version import parse as parse_version -from pip._internal.utils import misc # TODO: Move definition here. -from pip._internal.utils.packaging import get_installer -from pip._internal.utils.wheel import pkg_resources_distribution_for_wheel +from pip._internal.exceptions import InvalidWheel, NoneMetadataError, UnsupportedWheel +from pip._internal.utils.egg_link import egg_link_path_from_location +from pip._internal.utils.misc import display_path, normalize_path +from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file -from .base import BaseDistribution, BaseEnvironment, DistributionVersion +from .base import ( + BaseDistribution, + BaseEntryPoint, + BaseEnvironment, + DistributionVersion, + InfoPath, + Wheel, +) + +logger = logging.getLogger(__name__) + + +class EntryPoint(NamedTuple): + name: str + value: str + group: str + + +class InMemoryMetadata: + """IMetadataProvider that reads metadata files from a dictionary. + + This also maps metadata decoding exceptions to our internal exception type. + """ + + def __init__(self, metadata: Mapping[str, bytes], wheel_name: str) -> None: + self._metadata = metadata + self._wheel_name = wheel_name + + def has_metadata(self, name: str) -> bool: + return name in self._metadata + + def get_metadata(self, name: str) -> str: + try: + return self._metadata[name].decode() + except UnicodeDecodeError as e: + # Augment the default error with the origin of the file. + raise UnsupportedWheel( + f"Error decoding metadata for {self._wheel_name}: {e} in {name} file" + ) + + def get_metadata_lines(self, name: str) -> Iterable[str]: + return pkg_resources.yield_lines(self.get_metadata(name)) + + def metadata_isdir(self, name: str) -> bool: + return False + + def metadata_listdir(self, name: str) -> List[str]: + return [] + + def run_script(self, script_name: str, namespace: str) -> None: + pass class Distribution(BaseDistribution): - def __init__(self, dist): - # type: (pkg_resources.Distribution) -> None + def __init__(self, dist: pkg_resources.Distribution) -> None: self._dist = dist @classmethod - def from_wheel(cls, path, name): - # type: (str, str) -> Distribution - with zipfile.ZipFile(path, allowZip64=True) as zf: - dist = pkg_resources_distribution_for_wheel(zf, name, path) + def from_directory(cls, directory: str) -> BaseDistribution: + dist_dir = directory.rstrip(os.sep) + + # Build a PathMetadata object, from path to metadata. :wink: + base_dir, dist_dir_name = os.path.split(dist_dir) + metadata = pkg_resources.PathMetadata(base_dir, dist_dir) + + # Determine the correct Distribution object type. + if dist_dir.endswith(".egg-info"): + dist_cls = pkg_resources.Distribution + dist_name = os.path.splitext(dist_dir_name)[0] + else: + assert dist_dir.endswith(".dist-info") + dist_cls = pkg_resources.DistInfoDistribution + dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0] + + dist = dist_cls(base_dir, project_name=dist_name, metadata=metadata) + return cls(dist) + + @classmethod + def from_metadata_file_contents( + cls, + metadata_contents: bytes, + filename: str, + project_name: str, + ) -> BaseDistribution: + metadata_dict = { + "METADATA": metadata_contents, + } + dist = pkg_resources.DistInfoDistribution( + location=filename, + metadata=InMemoryMetadata(metadata_dict, filename), + project_name=project_name, + ) + return cls(dist) + + @classmethod + def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution: + try: + with wheel.as_zipfile() as zf: + info_dir, _ = parse_wheel(zf, name) + metadata_dict = { + path.split("/", 1)[-1]: read_wheel_metadata_file(zf, path) + for path in zf.namelist() + if path.startswith(f"{info_dir}/") + } + except zipfile.BadZipFile as e: + raise InvalidWheel(wheel.location, name) from e + except UnsupportedWheel as e: + raise UnsupportedWheel(f"{name} has an invalid wheel, {e}") + dist = pkg_resources.DistInfoDistribution( + location=wheel.location, + metadata=InMemoryMetadata(metadata_dict, wheel.location), + project_name=name, + ) return cls(dist) @property - def location(self): - # type: () -> Optional[str] + def location(self) -> Optional[str]: return self._dist.location @property - def metadata_version(self): - # type: () -> Optional[str] - for line in self._dist.get_metadata_lines(self._dist.PKG_INFO): - if line.lower().startswith("metadata-version:"): - return line.split(":", 1)[-1].strip() - return None + def installed_location(self) -> Optional[str]: + egg_link = egg_link_path_from_location(self.raw_name) + if egg_link: + location = egg_link + elif self.location: + location = self.location + else: + return None + return normalize_path(location) @property - def canonical_name(self): - # type: () -> str - return canonicalize_name(self._dist.project_name) + def info_location(self) -> Optional[str]: + return self._dist.egg_info @property - def version(self): - # type: () -> DistributionVersion - return parse_version(self._dist.version) + def installed_by_distutils(self) -> bool: + # A distutils-installed distribution is provided by FileMetadata. This + # provider has a "path" attribute not present anywhere else. Not the + # best introspection logic, but pip has been doing this for a long time. + try: + return bool(self._dist._provider.path) + except AttributeError: + return False @property - def installer(self): - # type: () -> str - return get_installer(self._dist) + def canonical_name(self) -> NormalizedName: + return canonicalize_name(self._dist.project_name) @property - def editable(self): - # type: () -> bool - return misc.dist_is_editable(self._dist) + def version(self) -> DistributionVersion: + return parse_version(self._dist.version) - @property - def local(self): - # type: () -> bool - return misc.dist_is_local(self._dist) + def is_file(self, path: InfoPath) -> bool: + return self._dist.has_metadata(str(path)) - @property - def in_usersite(self): - # type: () -> bool - return misc.dist_in_usersite(self._dist) + def iter_distutils_script_names(self) -> Iterator[str]: + yield from self._dist.metadata_listdir("scripts") + + def read_text(self, path: InfoPath) -> str: + name = str(path) + if not self._dist.has_metadata(name): + raise FileNotFoundError(name) + content = self._dist.get_metadata(name) + if content is None: + raise NoneMetadataError(self, name) + return content + + def iter_entry_points(self) -> Iterable[BaseEntryPoint]: + for group, entries in self._dist.get_entry_map().items(): + for name, entry_point in entries.items(): + name, _, value = str(entry_point).partition("=") + yield EntryPoint(name=name.strip(), value=value.strip(), group=group) + + def _metadata_impl(self) -> email.message.Message: + """ + :raises NoneMetadataError: if the distribution reports `has_metadata()` + True but `get_metadata()` returns None. + """ + if isinstance(self._dist, pkg_resources.DistInfoDistribution): + metadata_name = "METADATA" + else: + metadata_name = "PKG-INFO" + try: + metadata = self.read_text(metadata_name) + except FileNotFoundError: + if self.location: + displaying_path = display_path(self.location) + else: + displaying_path = repr(self.location) + logger.warning("No metadata found in %s", displaying_path) + metadata = "" + feed_parser = email.parser.FeedParser() + feed_parser.feed(metadata) + return feed_parser.close() + + def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: + if extras: # pkg_resources raises on invalid extras, so we sanitize. + extras = frozenset(extras).intersection(self._dist.extras) + return self._dist.requires(extras) + + def iter_provided_extras(self) -> Iterable[str]: + return self._dist.extras class Environment(BaseEnvironment): - def __init__(self, ws): - # type: (pkg_resources.WorkingSet) -> None + def __init__(self, ws: pkg_resources.WorkingSet) -> None: self._ws = ws @classmethod - def default(cls): - # type: () -> BaseEnvironment + def default(cls) -> BaseEnvironment: return cls(pkg_resources.working_set) @classmethod - def from_paths(cls, paths): - # type: (Optional[List[str]]) -> BaseEnvironment + def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment: return cls(pkg_resources.WorkingSet(paths)) - def _search_distribution(self, name): - # type: (str) -> Optional[BaseDistribution] + def _iter_distributions(self) -> Iterator[BaseDistribution]: + for dist in self._ws: + yield Distribution(dist) + + def _search_distribution(self, name: str) -> Optional[BaseDistribution]: """Find a distribution matching the ``name`` in the environment. This searches from *all* distributions available in the environment, to match the behavior of ``pkg_resources.get_distribution()``. """ canonical_name = canonicalize_name(name) - for dist in self.iter_distributions(): + for dist in self.iter_all_distributions(): if dist.canonical_name == canonical_name: return dist return None - def get_distribution(self, name): - # type: (str) -> Optional[BaseDistribution] - + def get_distribution(self, name: str) -> Optional[BaseDistribution]: # Search the distribution by looking through the working set. dist = self._search_distribution(name) if dist: @@ -119,8 +268,3 @@ class Environment(BaseEnvironment): except pkg_resources.DistributionNotFound: return None return self._search_distribution(name) - - def _iter_distributions(self): - # type: () -> Iterator[BaseDistribution] - for dist in self._ws: - yield Distribution(dist) diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index dcf6e70dd4b33c0f8fc88f364e6993e5bd8bd334..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-39.pyc deleted file mode 100644 index cfafa5c29d3746c7b5bcc5e4e07212cdcae43d18..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/candidate.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-39.pyc deleted file mode 100644 index b9a0186dc4d74627482e3280c892f7ab28f87fa7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-39.pyc deleted file mode 100644 index 042aa0e879b593edd2b294db2605eabf3e41cd6d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/format_control.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-39.pyc deleted file mode 100644 index 4c259d1af5dadcee851e9537a750d766df23152f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/index.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-39.pyc deleted file mode 100644 index 05cb7364d3452159544d3bf6f73ba5f3edb1c51a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/link.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-39.pyc deleted file mode 100644 index 8b62e151b755803d9720ca204d857e1b6aac09c2..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/scheme.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-39.pyc deleted file mode 100644 index 980cbaa7b8aded7542d6bd21504ca764aa9dd1d7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-39.pyc deleted file mode 100644 index c6bc3292e8a33c9ecd1ff1f32ce95f22c0d04374..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-39.pyc deleted file mode 100644 index 4f07b5657e4314610b53751575176d986ceb0970..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/target_python.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-39.pyc deleted file mode 100644 index 269f3b7fa5ee1adc13f1fb969e28ed26d3f3f0d7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/models/__pycache__/wheel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/models/candidate.py b/env/Lib/site-packages/pip/_internal/models/candidate.py index 3b91704a21c49e6e5e9f300d0884e66364e6ce18..a4963aec6388c27c3beb064f0a730af200380aee 100644 --- a/env/Lib/site-packages/pip/_internal/models/candidate.py +++ b/env/Lib/site-packages/pip/_internal/models/candidate.py @@ -5,30 +5,30 @@ from pip._internal.utils.models import KeyBasedCompareMixin class InstallationCandidate(KeyBasedCompareMixin): - """Represents a potential "candidate" for installation. - """ + """Represents a potential "candidate" for installation.""" __slots__ = ["name", "version", "link"] - def __init__(self, name, version, link): - # type: (str, str, Link) -> None + def __init__(self, name: str, version: str, link: Link) -> None: self.name = name self.version = parse_version(version) self.link = link super().__init__( key=(self.name, self.version, self.link), - defining_class=InstallationCandidate + defining_class=InstallationCandidate, ) - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return "<InstallationCandidate({!r}, {!r}, {!r})>".format( - self.name, self.version, self.link, + self.name, + self.version, + self.link, ) - def __str__(self): - # type: () -> str - return '{!r} candidate (version {} at {})'.format( - self.name, self.version, self.link, + def __str__(self) -> str: + return "{!r} candidate (version {} at {})".format( + self.name, + self.version, + self.link, ) diff --git a/env/Lib/site-packages/pip/_internal/models/direct_url.py b/env/Lib/site-packages/pip/_internal/models/direct_url.py index 345dbaf109a25d57d4806b98e2ff6172da880d63..e219d73849bbbfc556be108fac2ae619042bce1a 100644 --- a/env/Lib/site-packages/pip/_internal/models/direct_url.py +++ b/env/Lib/site-packages/pip/_internal/models/direct_url.py @@ -22,8 +22,9 @@ class DirectUrlValidationError(Exception): pass -def _get(d, expected_type, key, default=None): - # type: (Dict[str, Any], Type[T], str, Optional[T]) -> Optional[T] +def _get( + d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None +) -> Optional[T]: """Get value from dictionary and verify expected type.""" if key not in d: return default @@ -37,16 +38,16 @@ def _get(d, expected_type, key, default=None): return value -def _get_required(d, expected_type, key, default=None): - # type: (Dict[str, Any], Type[T], str, Optional[T]) -> T +def _get_required( + d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None +) -> T: value = _get(d, expected_type, key, default) if value is None: raise DirectUrlValidationError(f"{key} must have a value") return value -def _exactly_one_of(infos): - # type: (Iterable[Optional[InfoType]]) -> InfoType +def _exactly_one_of(infos: Iterable[Optional["InfoType"]]) -> "InfoType": infos = [info for info in infos if info is not None] if not infos: raise DirectUrlValidationError( @@ -60,8 +61,7 @@ def _exactly_one_of(infos): return infos[0] -def _filter_none(**kwargs): - # type: (Any) -> Dict[str, Any] +def _filter_none(**kwargs: Any) -> Dict[str, Any]: """Make dict excluding None values.""" return {k: v for k, v in kwargs.items() if v is not None} @@ -71,39 +71,29 @@ class VcsInfo: def __init__( self, - vcs, # type: str - commit_id, # type: str - requested_revision=None, # type: Optional[str] - resolved_revision=None, # type: Optional[str] - resolved_revision_type=None, # type: Optional[str] - ): + vcs: str, + commit_id: str, + requested_revision: Optional[str] = None, + ) -> None: self.vcs = vcs self.requested_revision = requested_revision self.commit_id = commit_id - self.resolved_revision = resolved_revision - self.resolved_revision_type = resolved_revision_type @classmethod - def _from_dict(cls, d): - # type: (Optional[Dict[str, Any]]) -> Optional[VcsInfo] + def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["VcsInfo"]: if d is None: return None return cls( vcs=_get_required(d, str, "vcs"), commit_id=_get_required(d, str, "commit_id"), requested_revision=_get(d, str, "requested_revision"), - resolved_revision=_get(d, str, "resolved_revision"), - resolved_revision_type=_get(d, str, "resolved_revision_type"), ) - def _to_dict(self): - # type: () -> Dict[str, Any] + def _to_dict(self) -> Dict[str, Any]: return _filter_none( vcs=self.vcs, requested_revision=self.requested_revision, commit_id=self.commit_id, - resolved_revision=self.resolved_revision, - resolved_revision_type=self.resolved_revision_type, ) @@ -112,20 +102,43 @@ class ArchiveInfo: def __init__( self, - hash=None, # type: Optional[str] - ): + hash: Optional[str] = None, + hashes: Optional[Dict[str, str]] = None, + ) -> None: + # set hashes before hash, since the hash setter will further populate hashes + self.hashes = hashes self.hash = hash + @property + def hash(self) -> Optional[str]: + return self._hash + + @hash.setter + def hash(self, value: Optional[str]) -> None: + if value is not None: + # Auto-populate the hashes key to upgrade to the new format automatically. + # We don't back-populate the legacy hash key from hashes. + try: + hash_name, hash_value = value.split("=", 1) + except ValueError: + raise DirectUrlValidationError( + f"invalid archive_info.hash format: {value!r}" + ) + if self.hashes is None: + self.hashes = {hash_name: hash_value} + elif hash_name not in self.hashes: + self.hashes = self.hashes.copy() + self.hashes[hash_name] = hash_value + self._hash = value + @classmethod - def _from_dict(cls, d): - # type: (Optional[Dict[str, Any]]) -> Optional[ArchiveInfo] + def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["ArchiveInfo"]: if d is None: return None - return cls(hash=_get(d, str, "hash")) + return cls(hash=_get(d, str, "hash"), hashes=_get(d, dict, "hashes")) - def _to_dict(self): - # type: () -> Dict[str, Any] - return _filter_none(hash=self.hash) + def _to_dict(self) -> Dict[str, Any]: + return _filter_none(hash=self.hash, hashes=self.hashes) class DirInfo: @@ -133,21 +146,17 @@ class DirInfo: def __init__( self, - editable=False, # type: bool - ): + editable: bool = False, + ) -> None: self.editable = editable @classmethod - def _from_dict(cls, d): - # type: (Optional[Dict[str, Any]]) -> Optional[DirInfo] + def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["DirInfo"]: if d is None: return None - return cls( - editable=_get_required(d, bool, "editable", default=False) - ) + return cls(editable=_get_required(d, bool, "editable", default=False)) - def _to_dict(self): - # type: () -> Dict[str, Any] + def _to_dict(self) -> Dict[str, Any]: return _filter_none(editable=self.editable or None) @@ -155,26 +164,24 @@ InfoType = Union[ArchiveInfo, DirInfo, VcsInfo] class DirectUrl: - def __init__( self, - url, # type: str - info, # type: InfoType - subdirectory=None, # type: Optional[str] - ): + url: str, + info: InfoType, + subdirectory: Optional[str] = None, + ) -> None: self.url = url self.info = info self.subdirectory = subdirectory - def _remove_auth_from_netloc(self, netloc): - # type: (str) -> str + def _remove_auth_from_netloc(self, netloc: str) -> str: if "@" not in netloc: return netloc user_pass, netloc_no_user_pass = netloc.split("@", 1) if ( - isinstance(self.info, VcsInfo) and - self.info.vcs == "git" and - user_pass == "git" + isinstance(self.info, VcsInfo) + and self.info.vcs == "git" + and user_pass == "git" ): return netloc if ENV_VAR_RE.match(user_pass): @@ -182,8 +189,7 @@ class DirectUrl: return netloc_no_user_pass @property - def redacted_url(self): - # type: () -> str + def redacted_url(self) -> str: """url with user:password part removed unless it is formed with environment variables as specified in PEP 610, or it is ``git`` in the case of a git URL. @@ -195,13 +201,11 @@ class DirectUrl: ) return surl - def validate(self): - # type: () -> None + def validate(self) -> None: self.from_dict(self.to_dict()) @classmethod - def from_dict(cls, d): - # type: (Dict[str, Any]) -> DirectUrl + def from_dict(cls, d: Dict[str, Any]) -> "DirectUrl": return DirectUrl( url=_get_required(d, str, "url"), subdirectory=_get(d, str, "subdirectory"), @@ -214,8 +218,7 @@ class DirectUrl: ), ) - def to_dict(self): - # type: () -> Dict[str, Any] + def to_dict(self) -> Dict[str, Any]: res = _filter_none( url=self.redacted_url, subdirectory=self.subdirectory, @@ -224,10 +227,11 @@ class DirectUrl: return res @classmethod - def from_json(cls, s): - # type: (str) -> DirectUrl + def from_json(cls, s: str) -> "DirectUrl": return cls.from_dict(json.loads(s)) - def to_json(self): - # type: () -> str + def to_json(self) -> str: return json.dumps(self.to_dict(), sort_keys=True) + + def is_local_editable(self) -> bool: + return isinstance(self.info, DirInfo) and self.info.editable diff --git a/env/Lib/site-packages/pip/_internal/models/format_control.py b/env/Lib/site-packages/pip/_internal/models/format_control.py index cf262af2918a8aad09ef3a2b939209ea910c8594..db3995eac9f9ec2450e0e2d4a18e666c0b178681 100644 --- a/env/Lib/site-packages/pip/_internal/models/format_control.py +++ b/env/Lib/site-packages/pip/_internal/models/format_control.py @@ -6,13 +6,15 @@ from pip._internal.exceptions import CommandError class FormatControl: - """Helper for managing formats from which a package can be installed. - """ + """Helper for managing formats from which a package can be installed.""" __slots__ = ["no_binary", "only_binary"] - def __init__(self, no_binary=None, only_binary=None): - # type: (Optional[Set[str]], Optional[Set[str]]) -> None + def __init__( + self, + no_binary: Optional[Set[str]] = None, + only_binary: Optional[Set[str]] = None, + ) -> None: if no_binary is None: no_binary = set() if only_binary is None: @@ -21,66 +23,58 @@ class FormatControl: self.no_binary = no_binary self.only_binary = only_binary - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, self.__class__): return NotImplemented if self.__slots__ != other.__slots__: return False - return all( - getattr(self, k) == getattr(other, k) - for k in self.__slots__ - ) + return all(getattr(self, k) == getattr(other, k) for k in self.__slots__) - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return "{}({}, {})".format( - self.__class__.__name__, - self.no_binary, - self.only_binary + self.__class__.__name__, self.no_binary, self.only_binary ) @staticmethod - def handle_mutual_excludes(value, target, other): - # type: (str, Set[str], Set[str]) -> None - if value.startswith('-'): + def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> None: + if value.startswith("-"): raise CommandError( "--no-binary / --only-binary option requires 1 argument." ) - new = value.split(',') - while ':all:' in new: + new = value.split(",") + while ":all:" in new: other.clear() target.clear() - target.add(':all:') - del new[:new.index(':all:') + 1] + target.add(":all:") + del new[: new.index(":all:") + 1] # Without a none, we want to discard everything as :all: covers it - if ':none:' not in new: + if ":none:" not in new: return for name in new: - if name == ':none:': + if name == ":none:": target.clear() continue name = canonicalize_name(name) other.discard(name) target.add(name) - def get_allowed_formats(self, canonical_name): - # type: (str) -> FrozenSet[str] + def get_allowed_formats(self, canonical_name: str) -> FrozenSet[str]: result = {"binary", "source"} if canonical_name in self.only_binary: - result.discard('source') + result.discard("source") elif canonical_name in self.no_binary: - result.discard('binary') - elif ':all:' in self.only_binary: - result.discard('source') - elif ':all:' in self.no_binary: - result.discard('binary') + result.discard("binary") + elif ":all:" in self.only_binary: + result.discard("source") + elif ":all:" in self.no_binary: + result.discard("binary") return frozenset(result) - def disallow_binaries(self): - # type: () -> None + def disallow_binaries(self) -> None: self.handle_mutual_excludes( - ':all:', self.no_binary, self.only_binary, + ":all:", + self.no_binary, + self.only_binary, ) diff --git a/env/Lib/site-packages/pip/_internal/models/index.py b/env/Lib/site-packages/pip/_internal/models/index.py index b148abb4250d40a4eef021674f1f1e5021f40c9a..b94c32511f0cda2363bfc4f29c9c8bfcc7101f9b 100644 --- a/env/Lib/site-packages/pip/_internal/models/index.py +++ b/env/Lib/site-packages/pip/_internal/models/index.py @@ -2,33 +2,27 @@ import urllib.parse class PackageIndex: - """Represents a Package Index and provides easier access to endpoints - """ + """Represents a Package Index and provides easier access to endpoints""" - __slots__ = ['url', 'netloc', 'simple_url', 'pypi_url', - 'file_storage_domain'] + __slots__ = ["url", "netloc", "simple_url", "pypi_url", "file_storage_domain"] - def __init__(self, url, file_storage_domain): - # type: (str, str) -> None + def __init__(self, url: str, file_storage_domain: str) -> None: super().__init__() self.url = url self.netloc = urllib.parse.urlsplit(url).netloc - self.simple_url = self._url_for_path('simple') - self.pypi_url = self._url_for_path('pypi') + self.simple_url = self._url_for_path("simple") + self.pypi_url = self._url_for_path("pypi") # This is part of a temporary hack used to block installs of PyPI # packages which depend on external urls only necessary until PyPI can # block such packages themselves self.file_storage_domain = file_storage_domain - def _url_for_path(self, path): - # type: (str) -> str + def _url_for_path(self, path: str) -> str: return urllib.parse.urljoin(self.url, path) -PyPI = PackageIndex( - 'https://pypi.org/', file_storage_domain='files.pythonhosted.org' -) +PyPI = PackageIndex("https://pypi.org/", file_storage_domain="files.pythonhosted.org") TestPyPI = PackageIndex( - 'https://test.pypi.org/', file_storage_domain='test-files.pythonhosted.org' + "https://test.pypi.org/", file_storage_domain="test-files.pythonhosted.org" ) diff --git a/env/Lib/site-packages/pip/_internal/models/link.py b/env/Lib/site-packages/pip/_internal/models/link.py index 86d0be4079d4de63359c14a230dbebd35470468f..4453519ad0202281cfa53b3ca2a0282a9b0a1799 100644 --- a/env/Lib/site-packages/pip/_internal/models/link.py +++ b/env/Lib/site-packages/pip/_internal/models/link.py @@ -1,12 +1,28 @@ +import functools +import itertools +import logging import os import posixpath import re import urllib.parse -from typing import TYPE_CHECKING, Optional, Tuple, Union +from dataclasses import dataclass +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Mapping, + NamedTuple, + Optional, + Tuple, + Union, +) +from pip._internal.utils.deprecation import deprecated from pip._internal.utils.filetypes import WHEEL_EXTENSION from pip._internal.utils.hashes import Hashes from pip._internal.utils.misc import ( + pairwise, redact_auth_from_url, split_auth_from_netloc, splitext, @@ -15,34 +31,182 @@ from pip._internal.utils.models import KeyBasedCompareMixin from pip._internal.utils.urls import path_to_url, url_to_path if TYPE_CHECKING: - from pip._internal.index.collector import HTMLPage + from pip._internal.index.collector import IndexContent +logger = logging.getLogger(__name__) -class Link(KeyBasedCompareMixin): - """Represents a parsed link from a Package Index's simple URL + +# Order matters, earlier hashes have a precedence over later hashes for what +# we will pick to use. +_SUPPORTED_HASHES = ("sha512", "sha384", "sha256", "sha224", "sha1", "md5") + + +@dataclass(frozen=True) +class LinkHash: + """Links to content may have embedded hash values. This class parses those. + + `name` must be any member of `_SUPPORTED_HASHES`. + + This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to + be JSON-serializable to conform to PEP 610, this class contains the logic for + parsing a hash name and value for correctness, and then checking whether that hash + conforms to a schema with `.is_hash_allowed()`.""" + + name: str + value: str + + _hash_url_fragment_re = re.compile( + # NB: we do not validate that the second group (.*) is a valid hex + # digest. Instead, we simply keep that string in this class, and then check it + # against Hashes when hash-checking is needed. This is easier to debug than + # proactively discarding an invalid hex digest, as we handle incorrect hashes + # and malformed hashes in the same place. + r"[#&]({choices})=([^&]*)".format( + choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES) + ), + ) + + def __post_init__(self) -> None: + assert self.name in _SUPPORTED_HASHES + + @classmethod + @functools.lru_cache(maxsize=None) + def find_hash_url_fragment(cls, url: str) -> Optional["LinkHash"]: + """Search a string for a checksum algorithm name and encoded output value.""" + match = cls._hash_url_fragment_re.search(url) + if match is None: + return None + name, value = match.groups() + return cls(name=name, value=value) + + def as_dict(self) -> Dict[str, str]: + return {self.name: self.value} + + def as_hashes(self) -> Hashes: + """Return a Hashes instance which checks only for the current hash.""" + return Hashes({self.name: [self.value]}) + + def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool: + """ + Return True if the current hash is allowed by `hashes`. + """ + if hashes is None: + return False + return hashes.is_hash_allowed(self.name, hex_digest=self.value) + + +@dataclass(frozen=True) +class MetadataFile: + """Information about a core metadata file associated with a distribution.""" + + hashes: Optional[Dict[str, str]] + + def __post_init__(self) -> None: + if self.hashes is not None: + assert all(name in _SUPPORTED_HASHES for name in self.hashes) + + +def supported_hashes(hashes: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]: + # Remove any unsupported hash types from the mapping. If this leaves no + # supported hashes, return None + if hashes is None: + return None + hashes = {n: v for n, v in hashes.items() if n in _SUPPORTED_HASHES} + if not hashes: + return None + return hashes + + +def _clean_url_path_part(part: str) -> str: """ + Clean a "part" of a URL path (i.e. after splitting on "@" characters). + """ + # We unquote prior to quoting to make sure nothing is double quoted. + return urllib.parse.quote(urllib.parse.unquote(part)) + + +def _clean_file_url_path(part: str) -> str: + """ + Clean the first part of a URL path that corresponds to a local + filesystem path (i.e. the first part after splitting on "@" characters). + """ + # We unquote prior to quoting to make sure nothing is double quoted. + # Also, on Windows the path part might contain a drive letter which + # should not be quoted. On Linux where drive letters do not + # exist, the colon should be quoted. We rely on urllib.request + # to do the right thing here. + return urllib.request.pathname2url(urllib.request.url2pathname(part)) + + +# percent-encoded: / +_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE) + + +def _clean_url_path(path: str, is_local_path: bool) -> str: + """ + Clean the path portion of a URL. + """ + if is_local_path: + clean_func = _clean_file_url_path + else: + clean_func = _clean_url_path_part + + # Split on the reserved characters prior to cleaning so that + # revision strings in VCS URLs are properly preserved. + parts = _reserved_chars_re.split(path) + + cleaned_parts = [] + for to_clean, reserved in pairwise(itertools.chain(parts, [""])): + cleaned_parts.append(clean_func(to_clean)) + # Normalize %xx escapes (e.g. %2f -> %2F) + cleaned_parts.append(reserved.upper()) + + return "".join(cleaned_parts) + + +def _ensure_quoted_url(url: str) -> str: + """ + Make sure a link is fully quoted. + For example, if ' ' occurs in the URL, it will be replaced with "%20", + and without double-quoting other characters. + """ + # Split the URL into parts according to the general structure + # `scheme://netloc/path;parameters?query#fragment`. + result = urllib.parse.urlparse(url) + # If the netloc is empty, then the URL refers to a local filesystem path. + is_local_path = not result.netloc + path = _clean_url_path(result.path, is_local_path=is_local_path) + return urllib.parse.urlunparse(result._replace(path=path)) + + +class Link(KeyBasedCompareMixin): + """Represents a parsed link from a Package Index's simple URL""" __slots__ = [ "_parsed_url", "_url", + "_hashes", "comes_from", "requires_python", "yanked_reason", + "metadata_file_data", "cache_link_parsing", + "egg_fragment", ] def __init__( self, - url, # type: str - comes_from=None, # type: Optional[Union[str, HTMLPage]] - requires_python=None, # type: Optional[str] - yanked_reason=None, # type: Optional[str] - cache_link_parsing=True, # type: bool - ): - # type: (...) -> None + url: str, + comes_from: Optional[Union[str, "IndexContent"]] = None, + requires_python: Optional[str] = None, + yanked_reason: Optional[str] = None, + metadata_file_data: Optional[MetadataFile] = None, + cache_link_parsing: bool = True, + hashes: Optional[Mapping[str, str]] = None, + ) -> None: """ :param url: url of the resource pointed to (href of the link) - :param comes_from: instance of HTMLPage where the link was found, + :param comes_from: instance of IndexContent where the link was found, or string. :param requires_python: String containing the `Requires-Python` metadata field, specified in PEP 345. This may be specified by @@ -54,15 +218,23 @@ class Link(KeyBasedCompareMixin): a simple repository HTML link. If the file has been yanked but no reason was provided, this should be the empty string. See PEP 592 for more information and the specification. + :param metadata_file_data: the metadata attached to the file, or None if + no such metadata is provided. This argument, if not None, indicates + that a separate metadata file exists, and also optionally supplies + hashes for that file. :param cache_link_parsing: A flag that is used elsewhere to determine - whether resources retrieved from this link - should be cached. PyPI index urls should - generally have this set to False, for - example. + whether resources retrieved from this link should be cached. PyPI + URLs should generally have this set to False, for example. + :param hashes: A mapping of hash names to digests to allow us to + determine the validity of a download. """ + # The comes_from, requires_python, and metadata_file_data arguments are + # only used by classmethods of this class, and are not used in client + # code directly. + # url can be a UNC windows share - if url.startswith('\\\\'): + if url.startswith("\\\\"): url = path_to_url(url) self._parsed_url = urllib.parse.urlsplit(url) @@ -70,39 +242,148 @@ class Link(KeyBasedCompareMixin): # trying to set a new value. self._url = url + link_hash = LinkHash.find_hash_url_fragment(url) + hashes_from_link = {} if link_hash is None else link_hash.as_dict() + if hashes is None: + self._hashes = hashes_from_link + else: + self._hashes = {**hashes, **hashes_from_link} + self.comes_from = comes_from self.requires_python = requires_python if requires_python else None self.yanked_reason = yanked_reason + self.metadata_file_data = metadata_file_data super().__init__(key=url, defining_class=Link) self.cache_link_parsing = cache_link_parsing + self.egg_fragment = self._egg_fragment() + + @classmethod + def from_json( + cls, + file_data: Dict[str, Any], + page_url: str, + ) -> Optional["Link"]: + """ + Convert an pypi json document from a simple repository page into a Link. + """ + file_url = file_data.get("url") + if file_url is None: + return None - def __str__(self): - # type: () -> str + url = _ensure_quoted_url(urllib.parse.urljoin(page_url, file_url)) + pyrequire = file_data.get("requires-python") + yanked_reason = file_data.get("yanked") + hashes = file_data.get("hashes", {}) + + # PEP 714: Indexes must use the name core-metadata, but + # clients should support the old name as a fallback for compatibility. + metadata_info = file_data.get("core-metadata") + if metadata_info is None: + metadata_info = file_data.get("dist-info-metadata") + + # The metadata info value may be a boolean, or a dict of hashes. + if isinstance(metadata_info, dict): + # The file exists, and hashes have been supplied + metadata_file_data = MetadataFile(supported_hashes(metadata_info)) + elif metadata_info: + # The file exists, but there are no hashes + metadata_file_data = MetadataFile(None) + else: + # False or not present: the file does not exist + metadata_file_data = None + + # The Link.yanked_reason expects an empty string instead of a boolean. + if yanked_reason and not isinstance(yanked_reason, str): + yanked_reason = "" + # The Link.yanked_reason expects None instead of False. + elif not yanked_reason: + yanked_reason = None + + return cls( + url, + comes_from=page_url, + requires_python=pyrequire, + yanked_reason=yanked_reason, + hashes=hashes, + metadata_file_data=metadata_file_data, + ) + + @classmethod + def from_element( + cls, + anchor_attribs: Dict[str, Optional[str]], + page_url: str, + base_url: str, + ) -> Optional["Link"]: + """ + Convert an anchor element's attributes in a simple repository page to a Link. + """ + href = anchor_attribs.get("href") + if not href: + return None + + url = _ensure_quoted_url(urllib.parse.urljoin(base_url, href)) + pyrequire = anchor_attribs.get("data-requires-python") + yanked_reason = anchor_attribs.get("data-yanked") + + # PEP 714: Indexes must use the name data-core-metadata, but + # clients should support the old name as a fallback for compatibility. + metadata_info = anchor_attribs.get("data-core-metadata") + if metadata_info is None: + metadata_info = anchor_attribs.get("data-dist-info-metadata") + # The metadata info value may be the string "true", or a string of + # the form "hashname=hashval" + if metadata_info == "true": + # The file exists, but there are no hashes + metadata_file_data = MetadataFile(None) + elif metadata_info is None: + # The file does not exist + metadata_file_data = None + else: + # The file exists, and hashes have been supplied + hashname, sep, hashval = metadata_info.partition("=") + if sep == "=": + metadata_file_data = MetadataFile(supported_hashes({hashname: hashval})) + else: + # Error - data is wrong. Treat as no hashes supplied. + logger.debug( + "Index returned invalid data-dist-info-metadata value: %s", + metadata_info, + ) + metadata_file_data = MetadataFile(None) + + return cls( + url, + comes_from=page_url, + requires_python=pyrequire, + yanked_reason=yanked_reason, + metadata_file_data=metadata_file_data, + ) + + def __str__(self) -> str: if self.requires_python: - rp = f' (requires-python:{self.requires_python})' + rp = f" (requires-python:{self.requires_python})" else: - rp = '' + rp = "" if self.comes_from: - return '{} (from {}){}'.format( - redact_auth_from_url(self._url), self.comes_from, rp) + return "{} (from {}){}".format( + redact_auth_from_url(self._url), self.comes_from, rp + ) else: return redact_auth_from_url(str(self._url)) - def __repr__(self): - # type: () -> str - return f'<Link {self}>' + def __repr__(self) -> str: + return f"<Link {self}>" @property - def url(self): - # type: () -> str + def url(self) -> str: return self._url @property - def filename(self): - # type: () -> str - path = self.path.rstrip('/') + def filename(self) -> str: + path = self.path.rstrip("/") name = posixpath.basename(path) if not name: # Make sure we don't leak auth information if the netloc @@ -111,138 +392,190 @@ class Link(KeyBasedCompareMixin): return netloc name = urllib.parse.unquote(name) - assert name, f'URL {self._url!r} produced no filename' + assert name, f"URL {self._url!r} produced no filename" return name @property - def file_path(self): - # type: () -> str + def file_path(self) -> str: return url_to_path(self.url) @property - def scheme(self): - # type: () -> str + def scheme(self) -> str: return self._parsed_url.scheme @property - def netloc(self): - # type: () -> str + def netloc(self) -> str: """ This can contain auth information. """ return self._parsed_url.netloc @property - def path(self): - # type: () -> str + def path(self) -> str: return urllib.parse.unquote(self._parsed_url.path) - def splitext(self): - # type: () -> Tuple[str, str] - return splitext(posixpath.basename(self.path.rstrip('/'))) + def splitext(self) -> Tuple[str, str]: + return splitext(posixpath.basename(self.path.rstrip("/"))) @property - def ext(self): - # type: () -> str + def ext(self) -> str: return self.splitext()[1] @property - def url_without_fragment(self): - # type: () -> str + def url_without_fragment(self) -> str: scheme, netloc, path, query, fragment = self._parsed_url - return urllib.parse.urlunsplit((scheme, netloc, path, query, None)) + return urllib.parse.urlunsplit((scheme, netloc, path, query, "")) - _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') + _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)") - @property - def egg_fragment(self): - # type: () -> Optional[str] + # Per PEP 508. + _project_name_re = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE + ) + + def _egg_fragment(self) -> Optional[str]: match = self._egg_fragment_re.search(self._url) if not match: return None - return match.group(1) - _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') + # An egg fragment looks like a PEP 508 project name, along with + # an optional extras specifier. Anything else is invalid. + project_name = match.group(1) + if not self._project_name_re.match(project_name): + deprecated( + reason=f"{self} contains an egg fragment with a non-PEP 508 name", + replacement="to use the req @ url syntax, and remove the egg fragment", + gone_in="25.0", + issue=11617, + ) + + return project_name + + _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)") @property - def subdirectory_fragment(self): - # type: () -> Optional[str] + def subdirectory_fragment(self) -> Optional[str]: match = self._subdirectory_fragment_re.search(self._url) if not match: return None return match.group(1) - _hash_re = re.compile( - r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' - ) + def metadata_link(self) -> Optional["Link"]: + """Return a link to the associated core metadata file (if any).""" + if self.metadata_file_data is None: + return None + metadata_url = f"{self.url_without_fragment}.metadata" + if self.metadata_file_data.hashes is None: + return Link(metadata_url) + return Link(metadata_url, hashes=self.metadata_file_data.hashes) + + def as_hashes(self) -> Hashes: + return Hashes({k: [v] for k, v in self._hashes.items()}) @property - def hash(self): - # type: () -> Optional[str] - match = self._hash_re.search(self._url) - if match: - return match.group(2) - return None + def hash(self) -> Optional[str]: + return next(iter(self._hashes.values()), None) @property - def hash_name(self): - # type: () -> Optional[str] - match = self._hash_re.search(self._url) - if match: - return match.group(1) - return None + def hash_name(self) -> Optional[str]: + return next(iter(self._hashes), None) @property - def show_url(self): - # type: () -> str - return posixpath.basename(self._url.split('#', 1)[0].split('?', 1)[0]) + def show_url(self) -> str: + return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0]) @property - def is_file(self): - # type: () -> bool - return self.scheme == 'file' + def is_file(self) -> bool: + return self.scheme == "file" - def is_existing_dir(self): - # type: () -> bool + def is_existing_dir(self) -> bool: return self.is_file and os.path.isdir(self.file_path) @property - def is_wheel(self): - # type: () -> bool + def is_wheel(self) -> bool: return self.ext == WHEEL_EXTENSION @property - def is_vcs(self): - # type: () -> bool + def is_vcs(self) -> bool: from pip._internal.vcs import vcs return self.scheme in vcs.all_schemes @property - def is_yanked(self): - # type: () -> bool + def is_yanked(self) -> bool: return self.yanked_reason is not None @property - def has_hash(self): - # type: () -> bool - return self.hash_name is not None + def has_hash(self) -> bool: + return bool(self._hashes) - def is_hash_allowed(self, hashes): - # type: (Optional[Hashes]) -> bool + def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool: """ - Return True if the link has a hash and it is allowed. + Return True if the link has a hash and it is allowed by `hashes`. """ - if hashes is None or not self.has_hash: + if hashes is None: return False - # Assert non-None so mypy knows self.hash_name and self.hash are str. - assert self.hash_name is not None - assert self.hash is not None + return any(hashes.is_hash_allowed(k, v) for k, v in self._hashes.items()) + + +class _CleanResult(NamedTuple): + """Convert link for equivalency check. + + This is used in the resolver to check whether two URL-specified requirements + likely point to the same distribution and can be considered equivalent. This + equivalency logic avoids comparing URLs literally, which can be too strict + (e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users. + + Currently this does three things: + + 1. Drop the basic auth part. This is technically wrong since a server can + serve different content based on auth, but if it does that, it is even + impossible to guarantee two URLs without auth are equivalent, since + the user can input different auth information when prompted. So the + practical solution is to assume the auth doesn't affect the response. + 2. Parse the query to avoid the ordering issue. Note that ordering under the + same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are + still considered different. + 3. Explicitly drop most of the fragment part, except ``subdirectory=`` and + hash values, since it should have no impact the downloaded content. Note + that this drops the "egg=" part historically used to denote the requested + project (and extras), which is wrong in the strictest sense, but too many + people are supplying it inconsistently to cause superfluous resolution + conflicts, so we choose to also ignore them. + """ - return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash) + parsed: urllib.parse.SplitResult + query: Dict[str, List[str]] + subdirectory: str + hashes: Dict[str, str] + + +def _clean_link(link: Link) -> _CleanResult: + parsed = link._parsed_url + netloc = parsed.netloc.rsplit("@", 1)[-1] + # According to RFC 8089, an empty host in file: means localhost. + if parsed.scheme == "file" and not netloc: + netloc = "localhost" + fragment = urllib.parse.parse_qs(parsed.fragment) + if "egg" in fragment: + logger.debug("Ignoring egg= fragment in %s", link) + try: + # If there are multiple subdirectory values, use the first one. + # This matches the behavior of Link.subdirectory_fragment. + subdirectory = fragment["subdirectory"][0] + except (IndexError, KeyError): + subdirectory = "" + # If there are multiple hash values under the same algorithm, use the + # first one. This matches the behavior of Link.hash_value. + hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment} + return _CleanResult( + parsed=parsed._replace(netloc=netloc, query="", fragment=""), + query=urllib.parse.parse_qs(parsed.query), + subdirectory=subdirectory, + hashes=hashes, + ) -# TODO: Relax this comparison logic to ignore, for example, fragments. -def links_equivalent(link1, link2): - # type: (Link, Link) -> bool - return link1 == link2 +@functools.lru_cache(maxsize=None) +def links_equivalent(link1: Link, link2: Link) -> bool: + return _clean_link(link1) == _clean_link(link2) diff --git a/env/Lib/site-packages/pip/_internal/models/scheme.py b/env/Lib/site-packages/pip/_internal/models/scheme.py index 697cd19b47827f6888fbc9d94cec06cccb17a33c..f51190ac60354d90eb2aef4b04c484f8517275c2 100644 --- a/env/Lib/site-packages/pip/_internal/models/scheme.py +++ b/env/Lib/site-packages/pip/_internal/models/scheme.py @@ -6,7 +6,7 @@ https://docs.python.org/3/install/index.html#alternate-installation. """ -SCHEME_KEYS = ['platlib', 'purelib', 'headers', 'scripts', 'data'] +SCHEME_KEYS = ["platlib", "purelib", "headers", "scripts", "data"] class Scheme: @@ -18,12 +18,12 @@ class Scheme: def __init__( self, - platlib, # type: str - purelib, # type: str - headers, # type: str - scripts, # type: str - data, # type: str - ): + platlib: str, + purelib: str, + headers: str, + scripts: str, + data: str, + ) -> None: self.platlib = platlib self.purelib = purelib self.headers = headers diff --git a/env/Lib/site-packages/pip/_internal/models/search_scope.py b/env/Lib/site-packages/pip/_internal/models/search_scope.py index a3f0a5c0f873f0a6a20885afe60f90e7ddb2eb55..fe61e8116b71e073351939ed7a499ee752398f1c 100644 --- a/env/Lib/site-packages/pip/_internal/models/search_scope.py +++ b/env/Lib/site-packages/pip/_internal/models/search_scope.py @@ -20,15 +20,15 @@ class SearchScope: Encapsulates the locations that pip is configured to search. """ - __slots__ = ["find_links", "index_urls"] + __slots__ = ["find_links", "index_urls", "no_index"] @classmethod def create( cls, - find_links, # type: List[str] - index_urls, # type: List[str] - ): - # type: (...) -> SearchScope + find_links: List[str], + index_urls: List[str], + no_index: bool, + ) -> "SearchScope": """ Create a SearchScope object after normalizing the `find_links`. """ @@ -37,9 +37,9 @@ class SearchScope: # it and if it exists, use the normalized version. # This is deliberately conservative - it might be fine just to # blindly normalize anything starting with a ~... - built_find_links = [] # type: List[str] + built_find_links: List[str] = [] for link in find_links: - if link.startswith('~'): + if link.startswith("~"): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link @@ -50,35 +50,35 @@ class SearchScope: if not has_tls(): for link in itertools.chain(index_urls, built_find_links): parsed = urllib.parse.urlparse(link) - if parsed.scheme == 'https': + if parsed.scheme == "https": logger.warning( - 'pip is configured with locations that require ' - 'TLS/SSL, however the ssl module in Python is not ' - 'available.' + "pip is configured with locations that require " + "TLS/SSL, however the ssl module in Python is not " + "available." ) break return cls( find_links=built_find_links, index_urls=index_urls, + no_index=no_index, ) def __init__( self, - find_links, # type: List[str] - index_urls, # type: List[str] - ): - # type: (...) -> None + find_links: List[str], + index_urls: List[str], + no_index: bool, + ) -> None: self.find_links = find_links self.index_urls = index_urls + self.no_index = no_index - def get_formatted_locations(self): - # type: () -> str + def get_formatted_locations(self) -> str: lines = [] redacted_index_urls = [] if self.index_urls and self.index_urls != [PyPI.simple_url]: for url in self.index_urls: - redacted_index_url = redact_auth_from_url(url) # Parse the URL @@ -91,41 +91,42 @@ class SearchScope: # exceptions for malformed URLs if not purl.scheme and not purl.netloc: logger.warning( - 'The index url "%s" seems invalid, ' - 'please provide a scheme.', redacted_index_url) + 'The index url "%s" seems invalid, please provide a scheme.', + redacted_index_url, + ) redacted_index_urls.append(redacted_index_url) - lines.append('Looking in indexes: {}'.format( - ', '.join(redacted_index_urls))) + lines.append( + "Looking in indexes: {}".format(", ".join(redacted_index_urls)) + ) if self.find_links: lines.append( - 'Looking in links: {}'.format(', '.join( - redact_auth_from_url(url) for url in self.find_links)) + "Looking in links: {}".format( + ", ".join(redact_auth_from_url(url) for url in self.find_links) + ) ) - return '\n'.join(lines) + return "\n".join(lines) - def get_index_urls_locations(self, project_name): - # type: (str) -> List[str] + def get_index_urls_locations(self, project_name: str) -> List[str]: """Returns the locations found via self.index_urls Checks the url_name on the main (first in the list) index and use this url_name to produce all locations """ - def mkurl_pypi_url(url): - # type: (str) -> str + def mkurl_pypi_url(url: str) -> str: loc = posixpath.join( - url, - urllib.parse.quote(canonicalize_name(project_name))) + url, urllib.parse.quote(canonicalize_name(project_name)) + ) # For maximum compatibility with easy_install, ensure the path # ends in a trailing slash. Although this isn't in the spec # (and PyPI can handle it without the slash) some other index # implementations might break if they relied on easy_install's # behavior. - if not loc.endswith('/'): - loc = loc + '/' + if not loc.endswith("/"): + loc = loc + "/" return loc return [mkurl_pypi_url(url) for url in self.index_urls] diff --git a/env/Lib/site-packages/pip/_internal/models/selection_prefs.py b/env/Lib/site-packages/pip/_internal/models/selection_prefs.py index edc1cf7995591efaa45a4b7797236073231c5c6f..977bc4caa75c1e76156fa97e2841a01332f6fa47 100644 --- a/env/Lib/site-packages/pip/_internal/models/selection_prefs.py +++ b/env/Lib/site-packages/pip/_internal/models/selection_prefs.py @@ -9,8 +9,13 @@ class SelectionPreferences: and installing files. """ - __slots__ = ['allow_yanked', 'allow_all_prereleases', 'format_control', - 'prefer_binary', 'ignore_requires_python'] + __slots__ = [ + "allow_yanked", + "allow_all_prereleases", + "format_control", + "prefer_binary", + "ignore_requires_python", + ] # Don't include an allow_yanked default value to make sure each call # site considers whether yanked releases are allowed. This also causes @@ -18,13 +23,12 @@ class SelectionPreferences: # people when reading the code. def __init__( self, - allow_yanked, # type: bool - allow_all_prereleases=False, # type: bool - format_control=None, # type: Optional[FormatControl] - prefer_binary=False, # type: bool - ignore_requires_python=None, # type: Optional[bool] - ): - # type: (...) -> None + allow_yanked: bool, + allow_all_prereleases: bool = False, + format_control: Optional[FormatControl] = None, + prefer_binary: bool = False, + ignore_requires_python: Optional[bool] = None, + ) -> None: """Create a SelectionPreferences object. :param allow_yanked: Whether files marked as yanked (in the sense diff --git a/env/Lib/site-packages/pip/_internal/models/target_python.py b/env/Lib/site-packages/pip/_internal/models/target_python.py index b91e349f5662f4c1762b2b1e9f7f002c1b54e6bc..744bd7ef58b4870406fcef8cb3b3667548a0ccea 100644 --- a/env/Lib/site-packages/pip/_internal/models/target_python.py +++ b/env/Lib/site-packages/pip/_internal/models/target_python.py @@ -26,12 +26,11 @@ class TargetPython: def __init__( self, - platforms=None, # type: Optional[List[str]] - py_version_info=None, # type: Optional[Tuple[int, ...]] - abis=None, # type: Optional[List[str]] - implementation=None, # type: Optional[str] - ): - # type: (...) -> None + platforms: Optional[List[str]] = None, + py_version_info: Optional[Tuple[int, ...]] = None, + abis: Optional[List[str]] = None, + implementation: Optional[str] = None, + ) -> None: """ :param platforms: A list of strings or None. If None, searches for packages that are supported by the current system. Otherwise, will @@ -54,7 +53,7 @@ class TargetPython: else: py_version_info = normalize_version_info(py_version_info) - py_version = '.'.join(map(str, py_version_info[:2])) + py_version = ".".join(map(str, py_version_info[:2])) self.abis = abis self.implementation = implementation @@ -63,32 +62,29 @@ class TargetPython: self.py_version_info = py_version_info # This is used to cache the return value of get_tags(). - self._valid_tags = None # type: Optional[List[Tag]] + self._valid_tags: Optional[List[Tag]] = None - def format_given(self): - # type: () -> str + def format_given(self) -> str: """ Format the given, non-None attributes for display. """ display_version = None if self._given_py_version_info is not None: - display_version = '.'.join( + display_version = ".".join( str(part) for part in self._given_py_version_info ) key_values = [ - ('platforms', self.platforms), - ('version_info', display_version), - ('abis', self.abis), - ('implementation', self.implementation), + ("platforms", self.platforms), + ("version_info", display_version), + ("abis", self.abis), + ("implementation", self.implementation), ] - return ' '.join( - f'{key}={value!r}' for key, value in key_values - if value is not None + return " ".join( + f"{key}={value!r}" for key, value in key_values if value is not None ) - def get_tags(self): - # type: () -> List[Tag] + def get_tags(self) -> List[Tag]: """ Return the supported PEP 425 tags to check wheel candidates against. diff --git a/env/Lib/site-packages/pip/_internal/models/wheel.py b/env/Lib/site-packages/pip/_internal/models/wheel.py index 0a582b305c900eb8e5ef39236502ec1dc21723b9..a5dc12bdd63163c86f87ce4b5430cdb16d73769d 100644 --- a/env/Lib/site-packages/pip/_internal/models/wheel.py +++ b/env/Lib/site-packages/pip/_internal/models/wheel.py @@ -13,45 +13,39 @@ class Wheel: """A wheel file""" wheel_file_re = re.compile( - r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.*?)) - ((-(?P<build>\d[^-]*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?) + r"""^(?P<namever>(?P<name>[^\s-]+?)-(?P<ver>[^\s-]*?)) + ((-(?P<build>\d[^-]*?))?-(?P<pyver>[^\s-]+?)-(?P<abi>[^\s-]+?)-(?P<plat>[^\s-]+?) \.whl|\.dist-info)$""", - re.VERBOSE + re.VERBOSE, ) - def __init__(self, filename): - # type: (str) -> None + def __init__(self, filename: str) -> None: """ :raises InvalidWheelFilename: when the filename is invalid for a wheel """ wheel_info = self.wheel_file_re.match(filename) if not wheel_info: - raise InvalidWheelFilename( - f"{filename} is not a valid wheel filename." - ) + raise InvalidWheelFilename(f"{filename} is not a valid wheel filename.") self.filename = filename - self.name = wheel_info.group('name').replace('_', '-') + self.name = wheel_info.group("name").replace("_", "-") # we'll assume "_" means "-" due to wheel naming scheme # (https://github.com/pypa/pip/issues/1150) - self.version = wheel_info.group('ver').replace('_', '-') - self.build_tag = wheel_info.group('build') - self.pyversions = wheel_info.group('pyver').split('.') - self.abis = wheel_info.group('abi').split('.') - self.plats = wheel_info.group('plat').split('.') + self.version = wheel_info.group("ver").replace("_", "-") + self.build_tag = wheel_info.group("build") + self.pyversions = wheel_info.group("pyver").split(".") + self.abis = wheel_info.group("abi").split(".") + self.plats = wheel_info.group("plat").split(".") # All the tag combinations from this file self.file_tags = { - Tag(x, y, z) for x in self.pyversions - for y in self.abis for z in self.plats + Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats } - def get_formatted_file_tags(self): - # type: () -> List[str] + def get_formatted_file_tags(self) -> List[str]: """Return the wheel's tags as a sorted list of strings.""" return sorted(str(tag) for tag in self.file_tags) - def support_index_min(self, tags): - # type: (List[Tag]) -> int + def support_index_min(self, tags: List[Tag]) -> int: """Return the lowest index that one of the wheel's file_tag combinations achieves in the given list of supported tags. @@ -64,12 +58,16 @@ class Wheel: :raises ValueError: If none of the wheel's file tags match one of the supported tags. """ - return min(tags.index(tag) for tag in self.file_tags if tag in tags) - - def find_most_preferred_tag(self, tags, tag_to_priority): - # type: (List[Tag], Dict[Tag, int]) -> int + try: + return next(i for i, t in enumerate(tags) if t in self.file_tags) + except StopIteration: + raise ValueError() + + def find_most_preferred_tag( + self, tags: List[Tag], tag_to_priority: Dict[Tag, int] + ) -> int: """Return the priority of the most preferred tag that one of the wheel's file - tag combinations acheives in the given list of supported tags using the given + tag combinations achieves in the given list of supported tags using the given tag_to_priority mapping, where lower priorities are more-preferred. This is used in place of support_index_min in some cases in order to avoid @@ -86,8 +84,7 @@ class Wheel: tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority ) - def supported(self, tags): - # type: (Iterable[Tag]) -> bool + def supported(self, tags: Iterable[Tag]) -> bool: """Return whether the wheel is compatible with one of the given tags. :param tags: the PEP 425 tags to check the wheel against. diff --git a/env/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index f3ee8b34fc4cf8f48df3d0a0ba2be58f2a0dc881..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/network/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-39.pyc deleted file mode 100644 index 82ccc78d38e85aa7e23215159a1ab3a616e04a29..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/network/__pycache__/auth.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-39.pyc deleted file mode 100644 index 26477c940f26ab4fbb0d679f7343d240d9a22075..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/network/__pycache__/cache.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-39.pyc deleted file mode 100644 index 957193870cd2e7c0f55c2abd9650e129da0bf881..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/network/__pycache__/download.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-39.pyc deleted file mode 100644 index e362245d91698f9c1799de1f3559ba70b9251bb5..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-39.pyc deleted file mode 100644 index 4cc899614a1af82e99e45cee9f7293d508f21c37..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/network/__pycache__/session.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-39.pyc deleted file mode 100644 index bb437df66b90ddb6c559a84ca5c5ace45dc195d0..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/network/__pycache__/utils.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-39.pyc deleted file mode 100644 index d8acfbb5f179a9f877350bb380761b33f51769b6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/network/__pycache__/xmlrpc.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/network/auth.py b/env/Lib/site-packages/pip/_internal/network/auth.py index bd54a5cba99a682ffd502dad9e9279d3cd92d330..94a82fa6618270d3a16f521a0fcf710a15a8aebc 100644 --- a/env/Lib/site-packages/pip/_internal/network/auth.py +++ b/env/Lib/site-packages/pip/_internal/network/auth.py @@ -3,15 +3,24 @@ Contains interface (MultiDomainBasicAuth) and associated glue code for providing credentials in the context of network requests. """ - import logging +import os +import shutil +import subprocess +import sysconfig +import typing import urllib.parse -from typing import Any, Dict, List, Optional, Tuple +from abc import ABC, abstractmethod +from functools import lru_cache +from os.path import commonprefix +from pathlib import Path +from typing import Any, Dict, List, NamedTuple, Optional, Tuple from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth from pip._vendor.requests.models import Request, Response from pip._vendor.requests.utils import get_netrc_auth +from pip._internal.utils.logging import getLogger from pip._internal.utils.misc import ( ask, ask_input, @@ -21,70 +30,256 @@ from pip._internal.utils.misc import ( ) from pip._internal.vcs.versioncontrol import AuthInfo -logger = logging.getLogger(__name__) +logger = getLogger(__name__) + +KEYRING_DISABLED = False + + +class Credentials(NamedTuple): + url: str + username: str + password: str + + +class KeyRingBaseProvider(ABC): + """Keyring base provider interface""" + + has_keyring: bool + + @abstractmethod + def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]: + ... + + @abstractmethod + def save_auth_info(self, url: str, username: str, password: str) -> None: + ... -Credentials = Tuple[str, str, str] -try: - import keyring -except ImportError: - keyring = None -except Exception as exc: - logger.warning( - "Keyring is skipped due to an exception: %s", str(exc), - ) - keyring = None +class KeyRingNullProvider(KeyRingBaseProvider): + """Keyring null provider""" + has_keyring = False -def get_keyring_auth(url, username): - # type: (Optional[str], Optional[str]) -> Optional[AuthInfo] - """Return the tuple auth for a given url from keyring.""" - global keyring - if not url or not keyring: + def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]: return None - try: - try: - get_credential = keyring.get_credential - except AttributeError: - pass - else: + def save_auth_info(self, url: str, username: str, password: str) -> None: + return None + + +class KeyRingPythonProvider(KeyRingBaseProvider): + """Keyring interface which uses locally imported `keyring`""" + + has_keyring = True + + def __init__(self) -> None: + import keyring + + self.keyring = keyring + + def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]: + # Support keyring's get_credential interface which supports getting + # credentials without a username. This is only available for + # keyring>=15.2.0. + if hasattr(self.keyring, "get_credential"): logger.debug("Getting credentials from keyring for %s", url) - cred = get_credential(url, username) + cred = self.keyring.get_credential(url, username) if cred is not None: return cred.username, cred.password return None - if username: + if username is not None: logger.debug("Getting password from keyring for %s", url) - password = keyring.get_password(url, username) + password = self.keyring.get_password(url, username) if password: return username, password + return None + + def save_auth_info(self, url: str, username: str, password: str) -> None: + self.keyring.set_password(url, username, password) + + +class KeyRingCliProvider(KeyRingBaseProvider): + """Provider which uses `keyring` cli + + Instead of calling the keyring package installed alongside pip + we call keyring on the command line which will enable pip to + use which ever installation of keyring is available first in + PATH. + """ + + has_keyring = True + + def __init__(self, cmd: str) -> None: + self.keyring = cmd + + def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]: + # This is the default implementation of keyring.get_credential + # https://github.com/jaraco/keyring/blob/97689324abcf01bd1793d49063e7ca01e03d7d07/keyring/backend.py#L134-L139 + if username is not None: + password = self._get_password(url, username) + if password is not None: + return username, password + return None + + def save_auth_info(self, url: str, username: str, password: str) -> None: + return self._set_password(url, username, password) - except Exception as exc: - logger.warning( - "Keyring is skipped due to an exception: %s", str(exc), + def _get_password(self, service_name: str, username: str) -> Optional[str]: + """Mirror the implementation of keyring.get_password using cli""" + if self.keyring is None: + return None + + cmd = [self.keyring, "get", service_name, username] + env = os.environ.copy() + env["PYTHONIOENCODING"] = "utf-8" + res = subprocess.run( + cmd, + stdin=subprocess.DEVNULL, + stdout=subprocess.PIPE, + env=env, ) - keyring = None - return None + if res.returncode: + return None + return res.stdout.decode("utf-8").strip(os.linesep) + def _set_password(self, service_name: str, username: str, password: str) -> None: + """Mirror the implementation of keyring.set_password using cli""" + if self.keyring is None: + return None + env = os.environ.copy() + env["PYTHONIOENCODING"] = "utf-8" + subprocess.run( + [self.keyring, "set", service_name, username], + input=f"{password}{os.linesep}".encode("utf-8"), + env=env, + check=True, + ) + return None + + +@lru_cache(maxsize=None) +def get_keyring_provider(provider: str) -> KeyRingBaseProvider: + logger.verbose("Keyring provider requested: %s", provider) + + # keyring has previously failed and been disabled + if KEYRING_DISABLED: + provider = "disabled" + if provider in ["import", "auto"]: + try: + impl = KeyRingPythonProvider() + logger.verbose("Keyring provider set: import") + return impl + except ImportError: + pass + except Exception as exc: + # In the event of an unexpected exception + # we should warn the user + msg = "Installed copy of keyring fails with exception %s" + if provider == "auto": + msg = msg + ", trying to find a keyring executable as a fallback" + logger.warning(msg, exc, exc_info=logger.isEnabledFor(logging.DEBUG)) + if provider in ["subprocess", "auto"]: + cli = shutil.which("keyring") + if cli and cli.startswith(sysconfig.get_path("scripts")): + # all code within this function is stolen from shutil.which implementation + @typing.no_type_check + def PATH_as_shutil_which_determines_it() -> str: + path = os.environ.get("PATH", None) + if path is None: + try: + path = os.confstr("CS_PATH") + except (AttributeError, ValueError): + # os.confstr() or CS_PATH is not available + path = os.defpath + # bpo-35755: Don't use os.defpath if the PATH environment variable is + # set to an empty string + + return path + + scripts = Path(sysconfig.get_path("scripts")) + + paths = [] + for path in PATH_as_shutil_which_determines_it().split(os.pathsep): + p = Path(path) + try: + if not p.samefile(scripts): + paths.append(path) + except FileNotFoundError: + pass + + path = os.pathsep.join(paths) + + cli = shutil.which("keyring", path=path) + + if cli: + logger.verbose("Keyring provider set: subprocess with executable %s", cli) + return KeyRingCliProvider(cli) + + logger.verbose("Keyring provider set: disabled") + return KeyRingNullProvider() -class MultiDomainBasicAuth(AuthBase): - def __init__(self, prompting=True, index_urls=None): - # type: (bool, Optional[List[str]]) -> None +class MultiDomainBasicAuth(AuthBase): + def __init__( + self, + prompting: bool = True, + index_urls: Optional[List[str]] = None, + keyring_provider: str = "auto", + ) -> None: self.prompting = prompting self.index_urls = index_urls - self.passwords = {} # type: Dict[str, AuthInfo] + self.keyring_provider = keyring_provider # type: ignore[assignment] + self.passwords: Dict[str, AuthInfo] = {} # When the user is prompted to enter credentials and keyring is # available, we will offer to save them. If the user accepts, # this value is set to the credentials they entered. After the # request authenticates, the caller should call # ``save_credentials`` to save these. - self._credentials_to_save = None # type: Optional[Credentials] + self._credentials_to_save: Optional[Credentials] = None + + @property + def keyring_provider(self) -> KeyRingBaseProvider: + return get_keyring_provider(self._keyring_provider) + + @keyring_provider.setter + def keyring_provider(self, provider: str) -> None: + # The free function get_keyring_provider has been decorated with + # functools.cache. If an exception occurs in get_keyring_auth that + # cache will be cleared and keyring disabled, take that into account + # if you want to remove this indirection. + self._keyring_provider = provider + + @property + def use_keyring(self) -> bool: + # We won't use keyring when --no-input is passed unless + # a specific provider is requested because it might require + # user interaction + return self.prompting or self._keyring_provider not in ["auto", "disabled"] + + def _get_keyring_auth( + self, + url: Optional[str], + username: Optional[str], + ) -> Optional[AuthInfo]: + """Return the tuple auth for a given url from keyring.""" + # Do nothing if no url was provided + if not url: + return None + + try: + return self.keyring_provider.get_auth_info(url, username) + except Exception as exc: + logger.warning( + "Keyring is skipped due to an exception: %s", + str(exc), + ) + global KEYRING_DISABLED + KEYRING_DISABLED = True + get_keyring_provider.cache_clear() + return None - def _get_index_url(self, url): - # type: (str) -> Optional[str] + def _get_index_url(self, url: str) -> Optional[str]: """Return the original index URL matching the requested URL. Cached or dynamically generated credentials may work against @@ -100,15 +295,45 @@ class MultiDomainBasicAuth(AuthBase): if not url or not self.index_urls: return None - for u in self.index_urls: - prefix = remove_auth_from_url(u).rstrip("/") + "/" - if url.startswith(prefix): - return u - return None + url = remove_auth_from_url(url).rstrip("/") + "/" + parsed_url = urllib.parse.urlsplit(url) - def _get_new_credentials(self, original_url, allow_netrc=True, - allow_keyring=False): - # type: (str, bool, bool) -> AuthInfo + candidates = [] + + for index in self.index_urls: + index = index.rstrip("/") + "/" + parsed_index = urllib.parse.urlsplit(remove_auth_from_url(index)) + if parsed_url == parsed_index: + return index + + if parsed_url.netloc != parsed_index.netloc: + continue + + candidate = urllib.parse.urlsplit(index) + candidates.append(candidate) + + if not candidates: + return None + + candidates.sort( + reverse=True, + key=lambda candidate: commonprefix( + [ + parsed_url.path, + candidate.path, + ] + ).rfind("/"), + ) + + return urllib.parse.urlunsplit(candidates[0]) + + def _get_new_credentials( + self, + original_url: str, + *, + allow_netrc: bool = True, + allow_keyring: bool = False, + ) -> AuthInfo: """Find and return credentials for the specified URL.""" # Split the credentials and netloc from the url. url, netloc, url_user_password = split_auth_netloc_from_url( @@ -147,18 +372,21 @@ class MultiDomainBasicAuth(AuthBase): # If we don't have a password and keyring is available, use it. if allow_keyring: # The index url is more specific than the netloc, so try it first + # fmt: off kr_auth = ( - get_keyring_auth(index_url, username) or - get_keyring_auth(netloc, username) + self._get_keyring_auth(index_url, username) or + self._get_keyring_auth(netloc, username) ) + # fmt: on if kr_auth: logger.debug("Found credentials in keyring for %s", netloc) return kr_auth return username, password - def _get_url_and_credentials(self, original_url): - # type: (str) -> Tuple[str, Optional[str], Optional[str]] + def _get_url_and_credentials( + self, original_url: str + ) -> Tuple[str, Optional[str], Optional[str]]: """Return the credentials to use for the provided URL. If allowed, netrc and keyring may be used to obtain the @@ -170,13 +398,19 @@ class MultiDomainBasicAuth(AuthBase): """ url, netloc, _ = split_auth_netloc_from_url(original_url) - # Use any stored credentials that we have for this netloc - username, password = self.passwords.get(netloc, (None, None)) + # Try to get credentials from original url + username, password = self._get_new_credentials(original_url) - if username is None and password is None: - # No stored credentials. Acquire new credentials without prompting - # the user. (e.g. from netrc, keyring, or the URL itself) - username, password = self._get_new_credentials(original_url) + # If credentials not found, use any stored credentials for this netloc. + # Do this if either the username or the password is missing. + # This accounts for the situation in which the user has specified + # the username in the index url, but the password comes from keyring. + if (username is None or password is None) and netloc in self.passwords: + un, pw = self.passwords[netloc] + # It is possible that the cached credentials are for a different username, + # in which case the cache should be ignored. + if username is None or username == un: + username, password = un, pw if username is not None or password is not None: # Convert the username and password if they're None, so that @@ -191,15 +425,14 @@ class MultiDomainBasicAuth(AuthBase): assert ( # Credentials were found - (username is not None and password is not None) or + (username is not None and password is not None) # Credentials were not found - (username is None and password is None) + or (username is None and password is None) ), f"Could not load credentials from url: {original_url}" return url, username, password - def __call__(self, req): - # type: (Request) -> Request + def __call__(self, req: Request) -> Request: # Get credentials for this request url, username, password = self._get_url_and_credentials(req.url) @@ -216,42 +449,51 @@ class MultiDomainBasicAuth(AuthBase): return req # Factored out to allow for easy patching in tests - def _prompt_for_password(self, netloc): - # type: (str) -> Tuple[Optional[str], Optional[str], bool] - username = ask_input(f"User for {netloc}: ") + def _prompt_for_password( + self, netloc: str + ) -> Tuple[Optional[str], Optional[str], bool]: + username = ask_input(f"User for {netloc}: ") if self.prompting else None if not username: return None, None, False - auth = get_keyring_auth(netloc, username) - if auth and auth[0] is not None and auth[1] is not None: - return auth[0], auth[1], False + if self.use_keyring: + auth = self._get_keyring_auth(netloc, username) + if auth and auth[0] is not None and auth[1] is not None: + return auth[0], auth[1], False password = ask_password("Password: ") return username, password, True # Factored out to allow for easy patching in tests - def _should_save_password_to_keyring(self): - # type: () -> bool - if not keyring: + def _should_save_password_to_keyring(self) -> bool: + if ( + not self.prompting + or not self.use_keyring + or not self.keyring_provider.has_keyring + ): return False return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y" - def handle_401(self, resp, **kwargs): - # type: (Response, **Any) -> Response + def handle_401(self, resp: Response, **kwargs: Any) -> Response: # We only care about 401 responses, anything else we want to just # pass through the actual response if resp.status_code != 401: return resp + username, password = None, None + + # Query the keyring for credentials: + if self.use_keyring: + username, password = self._get_new_credentials( + resp.url, + allow_netrc=False, + allow_keyring=True, + ) + # We are not able to prompt the user so simply return the response - if not self.prompting: + if not self.prompting and not username and not password: return resp parsed = urllib.parse.urlparse(resp.url) - # Query the keyring for credentials: - username, password = self._get_new_credentials(resp.url, - allow_netrc=False, - allow_keyring=True) - # Prompt the user for a new username and password save = False if not username and not password: @@ -264,11 +506,17 @@ class MultiDomainBasicAuth(AuthBase): # Prompt to save the password to keyring if save and self._should_save_password_to_keyring(): - self._credentials_to_save = (parsed.netloc, username, password) + self._credentials_to_save = Credentials( + url=parsed.netloc, + username=username, + password=password, + ) # Consume content and release the original connection to allow our new # request to reuse the same one. - resp.content + # The result of the assignment isn't used, it's just needed to consume + # the content. + _ = resp.content resp.raw.release_conn() # Add our new username and password to the request @@ -287,26 +535,27 @@ class MultiDomainBasicAuth(AuthBase): return new_resp - def warn_on_401(self, resp, **kwargs): - # type: (Response, **Any) -> None + def warn_on_401(self, resp: Response, **kwargs: Any) -> None: """Response callback to warn about incorrect credentials.""" if resp.status_code == 401: logger.warning( - '401 Error, Credentials not correct for %s', resp.request.url, + "401 Error, Credentials not correct for %s", + resp.request.url, ) - def save_credentials(self, resp, **kwargs): - # type: (Response, **Any) -> None + def save_credentials(self, resp: Response, **kwargs: Any) -> None: """Response callback to save credentials on success.""" - assert keyring is not None, "should never reach here without keyring" - if not keyring: - return + assert ( + self.keyring_provider.has_keyring + ), "should never reach here without keyring" creds = self._credentials_to_save self._credentials_to_save = None if creds and resp.status_code < 400: try: - logger.info('Saving credentials to keyring') - keyring.set_password(*creds) + logger.info("Saving credentials to keyring") + self.keyring_provider.save_auth_info( + creds.url, creds.username, creds.password + ) except Exception: - logger.exception('Failed to save credentials') + logger.exception("Failed to save credentials") diff --git a/env/Lib/site-packages/pip/_internal/network/cache.py b/env/Lib/site-packages/pip/_internal/network/cache.py index ce08932a57f7441e5f12069876d90a45a846154b..a81a23985198d2eaa3c25ad1f77924f0fcdb037b 100644 --- a/env/Lib/site-packages/pip/_internal/network/cache.py +++ b/env/Lib/site-packages/pip/_internal/network/cache.py @@ -3,7 +3,7 @@ import os from contextlib import contextmanager -from typing import Iterator, Optional +from typing import Generator, Optional from pip._vendor.cachecontrol.cache import BaseCache from pip._vendor.cachecontrol.caches import FileCache @@ -13,14 +13,12 @@ from pip._internal.utils.filesystem import adjacent_tmp_file, replace from pip._internal.utils.misc import ensure_dir -def is_from_cache(response): - # type: (Response) -> bool +def is_from_cache(response: Response) -> bool: return getattr(response, "from_cache", False) @contextmanager -def suppressed_cache_errors(): - # type: () -> Iterator[None] +def suppressed_cache_errors() -> Generator[None, None, None]: """If we can't access the cache then we can just skip caching and process requests as if caching wasn't enabled. """ @@ -36,14 +34,12 @@ class SafeFileCache(BaseCache): not be accessible or writable. """ - def __init__(self, directory): - # type: (str) -> None + def __init__(self, directory: str) -> None: assert directory is not None, "Cache directory must not be None." super().__init__() self.directory = directory - def _get_cache_path(self, name): - # type: (str) -> str + def _get_cache_path(self, name: str) -> str: # From cachecontrol.caches.file_cache.FileCache._fn, brought into our # class for backwards-compatibility and to avoid using a non-public # method. @@ -51,15 +47,13 @@ class SafeFileCache(BaseCache): parts = list(hashed[:5]) + [hashed] return os.path.join(self.directory, *parts) - def get(self, key): - # type: (str) -> Optional[bytes] + def get(self, key: str) -> Optional[bytes]: path = self._get_cache_path(key) with suppressed_cache_errors(): - with open(path, 'rb') as f: + with open(path, "rb") as f: return f.read() - def set(self, key, value): - # type: (str, bytes) -> None + def set(self, key: str, value: bytes, expires: Optional[int] = None) -> None: path = self._get_cache_path(key) with suppressed_cache_errors(): ensure_dir(os.path.dirname(path)) @@ -69,8 +63,7 @@ class SafeFileCache(BaseCache): replace(f.name, path) - def delete(self, key): - # type: (str) -> None + def delete(self, key: str) -> None: path = self._get_cache_path(key) with suppressed_cache_errors(): os.remove(path) diff --git a/env/Lib/site-packages/pip/_internal/network/download.py b/env/Lib/site-packages/pip/_internal/network/download.py index 1897d99a13fb2b8cce2e2c47d02b3ae4f67041af..79b82a570e5be5ce4f8e4dcc4906da8c18f08ef6 100644 --- a/env/Lib/site-packages/pip/_internal/network/download.py +++ b/env/Lib/site-packages/pip/_internal/network/download.py @@ -1,6 +1,6 @@ """Download files with progress indicators. """ -import cgi +import email.message import logging import mimetypes import os @@ -8,7 +8,7 @@ from typing import Iterable, Optional, Tuple from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response -from pip._internal.cli.progress_bars import DownloadProgressProvider +from pip._internal.cli.progress_bars import get_download_progress_renderer from pip._internal.exceptions import NetworkConnectionError from pip._internal.models.index import PyPI from pip._internal.models.link import Link @@ -20,20 +20,18 @@ from pip._internal.utils.misc import format_size, redact_auth_from_url, splitext logger = logging.getLogger(__name__) -def _get_http_response_size(resp): - # type: (Response) -> Optional[int] +def _get_http_response_size(resp: Response) -> Optional[int]: try: - return int(resp.headers['content-length']) + return int(resp.headers["content-length"]) except (ValueError, KeyError, TypeError): return None def _prepare_download( - resp, # type: Response - link, # type: Link - progress_bar # type: str -): - # type: (...) -> Iterable[bytes] + resp: Response, + link: Link, + progress_bar: str, +) -> Iterable[bytes]: total_length = _get_http_response_size(resp) if link.netloc == PyPI.file_storage_domain: @@ -44,7 +42,7 @@ def _prepare_download( logged_url = redact_auth_from_url(url) if total_length: - logged_url = '{} ({})'.format(logged_url, format_size(total_length)) + logged_url = "{} ({})".format(logged_url, format_size(total_length)) if is_from_cache(resp): logger.info("Using cached %s", logged_url) @@ -67,49 +65,44 @@ def _prepare_download( if not show_progress: return chunks - return DownloadProgressProvider( - progress_bar, max=total_length - )(chunks) + renderer = get_download_progress_renderer(bar_type=progress_bar, size=total_length) + return renderer(chunks) -def sanitize_content_filename(filename): - # type: (str) -> str +def sanitize_content_filename(filename: str) -> str: """ Sanitize the "filename" value from a Content-Disposition header. """ return os.path.basename(filename) -def parse_content_disposition(content_disposition, default_filename): - # type: (str, str) -> str +def parse_content_disposition(content_disposition: str, default_filename: str) -> str: """ Parse the "filename" value from a Content-Disposition header, and return the default filename if the result is empty. """ - _type, params = cgi.parse_header(content_disposition) - filename = params.get('filename') + m = email.message.Message() + m["content-type"] = content_disposition + filename = m.get_param("filename") if filename: # We need to sanitize the filename to prevent directory traversal # in case the filename contains ".." path parts. - filename = sanitize_content_filename(filename) + filename = sanitize_content_filename(str(filename)) return filename or default_filename -def _get_http_response_filename(resp, link): - # type: (Response, Link) -> str +def _get_http_response_filename(resp: Response, link: Link) -> str: """Get an ideal filename from the given HTTP response, falling back to the link filename if not provided. """ filename = link.filename # fallback # Have a look at the Content-Disposition header for a better guess - content_disposition = resp.headers.get('content-disposition') + content_disposition = resp.headers.get("content-disposition") if content_disposition: filename = parse_content_disposition(content_disposition, filename) - ext = splitext(filename)[1] # type: Optional[str] + ext: Optional[str] = splitext(filename)[1] if not ext: - ext = mimetypes.guess_extension( - resp.headers.get('content-type', '') - ) + ext = mimetypes.guess_extension(resp.headers.get("content-type", "")) if ext: filename += ext if not ext and link.url != resp.url: @@ -119,9 +112,8 @@ def _get_http_response_filename(resp, link): return filename -def _http_get_download(session, link): - # type: (PipSession, Link) -> Response - target_url = link.url.split('#', 1)[0] +def _http_get_download(session: PipSession, link: Link) -> Response: + target_url = link.url.split("#", 1)[0] resp = session.get(target_url, headers=HEADERS, stream=True) raise_for_status(resp) return resp @@ -130,15 +122,13 @@ def _http_get_download(session, link): class Downloader: def __init__( self, - session, # type: PipSession - progress_bar, # type: str - ): - # type: (...) -> None + session: PipSession, + progress_bar: str, + ) -> None: self._session = session self._progress_bar = progress_bar - def __call__(self, link, location): - # type: (Link, str) -> Tuple[str, str] + def __call__(self, link: Link, location: str) -> Tuple[str, str]: """Download the file given by link into location.""" try: resp = _http_get_download(self._session, link) @@ -153,26 +143,25 @@ class Downloader: filepath = os.path.join(location, filename) chunks = _prepare_download(resp, link, self._progress_bar) - with open(filepath, 'wb') as content_file: + with open(filepath, "wb") as content_file: for chunk in chunks: content_file.write(chunk) - content_type = resp.headers.get('Content-Type', '') + content_type = resp.headers.get("Content-Type", "") return filepath, content_type class BatchDownloader: - def __init__( self, - session, # type: PipSession - progress_bar, # type: str - ): - # type: (...) -> None + session: PipSession, + progress_bar: str, + ) -> None: self._session = session self._progress_bar = progress_bar - def __call__(self, links, location): - # type: (Iterable[Link], str) -> Iterable[Tuple[Link, Tuple[str, str]]] + def __call__( + self, links: Iterable[Link], location: str + ) -> Iterable[Tuple[Link, Tuple[str, str]]]: """Download the files given by links into location.""" for link in links: try: @@ -181,7 +170,8 @@ class BatchDownloader: assert e.response is not None logger.critical( "HTTP error %s while getting %s", - e.response.status_code, link, + e.response.status_code, + link, ) raise @@ -189,8 +179,8 @@ class BatchDownloader: filepath = os.path.join(location, filename) chunks = _prepare_download(resp, link, self._progress_bar) - with open(filepath, 'wb') as content_file: + with open(filepath, "wb") as content_file: for chunk in chunks: content_file.write(chunk) - content_type = resp.headers.get('Content-Type', '') + content_type = resp.headers.get("Content-Type", "") yield link, (filepath, content_type) diff --git a/env/Lib/site-packages/pip/_internal/network/lazy_wheel.py b/env/Lib/site-packages/pip/_internal/network/lazy_wheel.py index b877d3b7a67f7a490fa5e6604cd993a07aae2f74..82ec50d5106ff0ac41dd1c03c2a789dbc468c401 100644 --- a/env/Lib/site-packages/pip/_internal/network/lazy_wheel.py +++ b/env/Lib/site-packages/pip/_internal/network/lazy_wheel.py @@ -1,41 +1,40 @@ """Lazy ZIP over HTTP""" -__all__ = ['HTTPRangeRequestUnsupported', 'dist_from_wheel_url'] +__all__ = ["HTTPRangeRequestUnsupported", "dist_from_wheel_url"] from bisect import bisect_left, bisect_right from contextlib import contextmanager from tempfile import NamedTemporaryFile -from typing import Any, Dict, Iterator, List, Optional, Tuple -from zipfile import BadZipfile, ZipFile +from typing import Any, Dict, Generator, List, Optional, Tuple +from zipfile import BadZipFile, ZipFile -from pip._vendor.pkg_resources import Distribution +from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response +from pip._internal.metadata import BaseDistribution, MemoryWheel, get_wheel_distribution from pip._internal.network.session import PipSession from pip._internal.network.utils import HEADERS, raise_for_status, response_chunks -from pip._internal.utils.wheel import pkg_resources_distribution_for_wheel class HTTPRangeRequestUnsupported(Exception): pass -def dist_from_wheel_url(name, url, session): - # type: (str, str, PipSession) -> Distribution - """Return a pkg_resources.Distribution from the given wheel URL. +def dist_from_wheel_url(name: str, url: str, session: PipSession) -> BaseDistribution: + """Return a distribution object from the given wheel URL. - This uses HTTP range requests to only fetch the potion of the wheel + This uses HTTP range requests to only fetch the portion of the wheel containing metadata, just enough for the object to be constructed. If such requests are not supported, HTTPRangeRequestUnsupported is raised. """ - with LazyZipOverHTTP(url, session) as wheel: + with LazyZipOverHTTP(url, session) as zf: # For read-only ZIP files, ZipFile only needs methods read, # seek, seekable and tell, not the whole IO protocol. - zip_file = ZipFile(wheel) # type: ignore + wheel = MemoryWheel(zf.name, zf) # type: ignore # After context manager exit, wheel.name # is an invalid file by intention. - return pkg_resources_distribution_for_wheel(zip_file, name, wheel.name) + return get_wheel_distribution(wheel, canonicalize_name(name)) class LazyZipOverHTTP: @@ -47,51 +46,46 @@ class LazyZipOverHTTP: during initialization. """ - def __init__(self, url, session, chunk_size=CONTENT_CHUNK_SIZE): - # type: (str, PipSession, int) -> None + def __init__( + self, url: str, session: PipSession, chunk_size: int = CONTENT_CHUNK_SIZE + ) -> None: head = session.head(url, headers=HEADERS) raise_for_status(head) assert head.status_code == 200 self._session, self._url, self._chunk_size = session, url, chunk_size - self._length = int(head.headers['Content-Length']) + self._length = int(head.headers["Content-Length"]) self._file = NamedTemporaryFile() self.truncate(self._length) - self._left = [] # type: List[int] - self._right = [] # type: List[int] - if 'bytes' not in head.headers.get('Accept-Ranges', 'none'): - raise HTTPRangeRequestUnsupported('range request is not supported') + self._left: List[int] = [] + self._right: List[int] = [] + if "bytes" not in head.headers.get("Accept-Ranges", "none"): + raise HTTPRangeRequestUnsupported("range request is not supported") self._check_zip() @property - def mode(self): - # type: () -> str + def mode(self) -> str: """Opening mode, which is always rb.""" - return 'rb' + return "rb" @property - def name(self): - # type: () -> str + def name(self) -> str: """Path to the underlying file.""" return self._file.name - def seekable(self): - # type: () -> bool + def seekable(self) -> bool: """Return whether random access is supported, which is True.""" return True - def close(self): - # type: () -> None + def close(self) -> None: """Close the file.""" self._file.close() @property - def closed(self): - # type: () -> bool + def closed(self) -> bool: """Whether the file is closed.""" return self._file.closed - def read(self, size=-1): - # type: (int) -> bytes + def read(self, size: int = -1) -> bytes: """Read up to size bytes from the object and return them. As a convenience, if size is unspecified or -1, @@ -100,18 +94,16 @@ class LazyZipOverHTTP: """ download_size = max(size, self._chunk_size) start, length = self.tell(), self._length - stop = length if size < 0 else min(start+download_size, length) - start = max(0, stop-download_size) - self._download(start, stop-1) + stop = length if size < 0 else min(start + download_size, length) + start = max(0, stop - download_size) + self._download(start, stop - 1) return self._file.read(size) - def readable(self): - # type: () -> bool + def readable(self) -> bool: """Return whether the file is readable, which is True.""" return True - def seek(self, offset, whence=0): - # type: (int, int) -> int + def seek(self, offset: int, whence: int = 0) -> int: """Change stream position and return the new absolute position. Seek to offset relative position indicated by whence: @@ -121,13 +113,11 @@ class LazyZipOverHTTP: """ return self._file.seek(offset, whence) - def tell(self): - # type: () -> int - """Return the current possition.""" + def tell(self) -> int: + """Return the current position.""" return self._file.tell() - def truncate(self, size=None): - # type: (Optional[int]) -> int + def truncate(self, size: Optional[int] = None) -> int: """Resize the stream to the given size in bytes. If size is unspecified resize to the current position. @@ -137,23 +127,19 @@ class LazyZipOverHTTP: """ return self._file.truncate(size) - def writable(self): - # type: () -> bool + def writable(self) -> bool: """Return False.""" return False - def __enter__(self): - # type: () -> LazyZipOverHTTP + def __enter__(self) -> "LazyZipOverHTTP": self._file.__enter__() return self - def __exit__(self, *exc): - # type: (*Any) -> Optional[bool] - return self._file.__exit__(*exc) + def __exit__(self, *exc: Any) -> None: + self._file.__exit__(*exc) @contextmanager - def _stay(self): - # type: ()-> Iterator[None] + def _stay(self) -> Generator[None, None, None]: """Return a context manager keeping the position. At the end of the block, seek back to original position. @@ -164,8 +150,7 @@ class LazyZipOverHTTP: finally: self.seek(pos) - def _check_zip(self): - # type: () -> None + def _check_zip(self) -> None: """Check and download until the file is a valid ZIP.""" end = self._length - 1 for start in reversed(range(0, end, self._chunk_size)): @@ -175,23 +160,25 @@ class LazyZipOverHTTP: # For read-only ZIP files, ZipFile only needs # methods read, seek, seekable and tell. ZipFile(self) # type: ignore - except BadZipfile: + except BadZipFile: pass else: break - def _stream_response(self, start, end, base_headers=HEADERS): - # type: (int, int, Dict[str, str]) -> Response + def _stream_response( + self, start: int, end: int, base_headers: Dict[str, str] = HEADERS + ) -> Response: """Return HTTP response to a range request from start to end.""" headers = base_headers.copy() - headers['Range'] = f'bytes={start}-{end}' + headers["Range"] = f"bytes={start}-{end}" # TODO: Get range requests to be correctly cached - headers['Cache-Control'] = 'no-cache' + headers["Cache-Control"] = "no-cache" return self._session.get(self._url, headers=headers, stream=True) - def _merge(self, start, end, left, right): - # type: (int, int, int, int) -> Iterator[Tuple[int, int]] - """Return an iterator of intervals to be fetched. + def _merge( + self, start: int, end: int, left: int, right: int + ) -> Generator[Tuple[int, int], None, None]: + """Return a generator of intervals to be fetched. Args: start (int): Start of needed interval @@ -200,18 +187,17 @@ class LazyZipOverHTTP: right (int): Index after last overlapping downloaded data """ lslice, rslice = self._left[left:right], self._right[left:right] - i = start = min([start]+lslice[:1]) - end = max([end]+rslice[-1:]) + i = start = min([start] + lslice[:1]) + end = max([end] + rslice[-1:]) for j, k in zip(lslice, rslice): if j > i: - yield i, j-1 + yield i, j - 1 i = k + 1 if i <= end: yield i, end self._left[left:right], self._right[left:right] = [start], [end] - def _download(self, start, end): - # type: (int, int) -> None + def _download(self, start: int, end: int) -> None: """Download bytes from start to end inclusively.""" with self._stay(): left = bisect_left(self._right, start) diff --git a/env/Lib/site-packages/pip/_internal/network/session.py b/env/Lib/site-packages/pip/_internal/network/session.py index 4af800f12fed7b4ed5041d49610332e312f21bec..887dc14e796cad0257e5ccfd51ed3a21b7908821 100644 --- a/env/Lib/site-packages/pip/_internal/network/session.py +++ b/env/Lib/site-packages/pip/_internal/network/session.py @@ -2,31 +2,36 @@ network request configuration and behavior. """ -# When mypy runs on Windows the call to distro.linux_distribution() is skipped -# resulting in the failure: -# -# error: unused 'type: ignore' comment -# -# If the upstream module adds typing, this comment should be removed. See -# https://github.com/nir0s/distro/pull/269 -# -# mypy: warn-unused-ignores=False - import email.utils +import io import ipaddress import json import logging import mimetypes import os import platform +import shutil +import subprocess import sys import urllib.parse import warnings -from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Union +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) from pip._vendor import requests, urllib3 -from pip._vendor.cachecontrol import CacheControlAdapter -from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter +from pip._vendor.cachecontrol import CacheControlAdapter as _BaseCacheControlAdapter +from pip._vendor.requests.adapters import DEFAULT_POOLBLOCK, BaseAdapter +from pip._vendor.requests.adapters import HTTPAdapter as _BaseHTTPAdapter from pip._vendor.requests.models import PreparedRequest, Response from pip._vendor.requests.structures import CaseInsensitiveDict from pip._vendor.urllib3.connectionpool import ConnectionPool @@ -44,6 +49,12 @@ from pip._internal.utils.glibc import libc_ver from pip._internal.utils.misc import build_url_from_netloc, parse_netloc from pip._internal.utils.urls import url_to_path +if TYPE_CHECKING: + from ssl import SSLContext + + from pip._vendor.urllib3.poolmanager import PoolManager + + logger = logging.getLogger(__name__) SecureOrigin = Tuple[str, str, Optional[Union[int, str]]] @@ -53,7 +64,7 @@ SecureOrigin = Tuple[str, str, Optional[Union[int, str]]] warnings.filterwarnings("ignore", category=InsecureRequestWarning) -SECURE_ORIGINS = [ +SECURE_ORIGINS: List[SecureOrigin] = [ # protocol, hostname, port # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) ("https", "*", "*"), @@ -63,7 +74,7 @@ SECURE_ORIGINS = [ ("file", "*", None), # ssh is always secure. ("ssh", "*", "*"), -] # type: List[SecureOrigin] +] # These are environment variables present when running under various @@ -75,18 +86,17 @@ SECURE_ORIGINS = [ # For more background, see: https://github.com/pypa/pip/issues/5499 CI_ENVIRONMENT_VARIABLES = ( # Azure Pipelines - 'BUILD_BUILDID', + "BUILD_BUILDID", # Jenkins - 'BUILD_ID', + "BUILD_ID", # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI - 'CI', + "CI", # Explicit environment variable. - 'PIP_IS_CI', + "PIP_IS_CI", ) -def looks_like_ci(): - # type: () -> bool +def looks_like_ci() -> bool: """ Return whether it looks like pip is running under CI. """ @@ -96,48 +106,50 @@ def looks_like_ci(): return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES) -def user_agent(): - # type: () -> str +def user_agent() -> str: """ Return a string representing the user agent. """ - data = { + data: Dict[str, Any] = { "installer": {"name": "pip", "version": __version__}, "python": platform.python_version(), "implementation": { "name": platform.python_implementation(), }, - } # type: Dict[str, Any] + } - if data["implementation"]["name"] == 'CPython': + if data["implementation"]["name"] == "CPython": data["implementation"]["version"] = platform.python_version() - elif data["implementation"]["name"] == 'PyPy': + elif data["implementation"]["name"] == "PyPy": pypy_version_info = sys.pypy_version_info # type: ignore - if pypy_version_info.releaselevel == 'final': + if pypy_version_info.releaselevel == "final": pypy_version_info = pypy_version_info[:3] data["implementation"]["version"] = ".".join( [str(x) for x in pypy_version_info] ) - elif data["implementation"]["name"] == 'Jython': + elif data["implementation"]["name"] == "Jython": # Complete Guess data["implementation"]["version"] = platform.python_version() - elif data["implementation"]["name"] == 'IronPython': + elif data["implementation"]["name"] == "IronPython": # Complete Guess data["implementation"]["version"] = platform.python_version() if sys.platform.startswith("linux"): from pip._vendor import distro - # https://github.com/nir0s/distro/pull/269 - linux_distribution = distro.linux_distribution() # type: ignore - distro_infos = dict(filter( - lambda x: x[1], - zip(["name", "version", "id"], linux_distribution), - )) - libc = dict(filter( - lambda x: x[1], - zip(["lib", "version"], libc_ver()), - )) + linux_distribution = distro.name(), distro.version(), distro.codename() + distro_infos: Dict[str, Any] = dict( + filter( + lambda x: x[1], + zip(["name", "version", "id"], linux_distribution), + ) + ) + libc = dict( + filter( + lambda x: x[1], + zip(["lib", "version"], libc_ver()), + ) + ) if libc: distro_infos["libc"] = libc if distro_infos: @@ -157,12 +169,28 @@ def user_agent(): if has_tls(): import _ssl as ssl + data["openssl_version"] = ssl.OPENSSL_VERSION setuptools_dist = get_default_environment().get_distribution("setuptools") if setuptools_dist is not None: data["setuptools_version"] = str(setuptools_dist.version) + if shutil.which("rustc") is not None: + # If for any reason `rustc --version` fails, silently ignore it + try: + rustc_output = subprocess.check_output( + ["rustc", "--version"], stderr=subprocess.STDOUT, timeout=0.5 + ) + except Exception: + pass + else: + if rustc_output.startswith(b"rustc "): + # The format of `rustc --version` is: + # `b'rustc 1.52.1 (9bc8c42bb 2021-05-09)\n'` + # We extract just the middle (1.52.1) part + data["rustc_version"] = rustc_output.split(b" ")[1].decode() + # Use None rather than False so as not to give the impression that # pip knows it is not being run under CI. Rather, it is a null or # inconclusive result. Also, we include some value rather than no @@ -180,17 +208,15 @@ def user_agent(): class LocalFSAdapter(BaseAdapter): - def send( self, - request, # type: PreparedRequest - stream=False, # type: bool - timeout=None, # type: Optional[Union[float, Tuple[float, float]]] - verify=True, # type: Union[bool, str] - cert=None, # type: Optional[Union[str, Tuple[str, str]]] - proxies=None, # type:Optional[Mapping[str, str]] - ): - # type: (...) -> Response + request: PreparedRequest, + stream: bool = False, + timeout: Optional[Union[float, Tuple[float, float]]] = None, + verify: Union[bool, str] = True, + cert: Optional[Union[str, Tuple[str, str]]] = None, + proxies: Optional[Mapping[str, str]] = None, + ) -> Response: pathname = url_to_path(request.url) resp = Response() @@ -200,67 +226,108 @@ class LocalFSAdapter(BaseAdapter): try: stats = os.stat(pathname) except OSError as exc: + # format the exception raised as a io.BytesIO object, + # to return a better error message: resp.status_code = 404 - resp.raw = exc + resp.reason = type(exc).__name__ + resp.raw = io.BytesIO(f"{resp.reason}: {exc}".encode("utf8")) else: modified = email.utils.formatdate(stats.st_mtime, usegmt=True) content_type = mimetypes.guess_type(pathname)[0] or "text/plain" - resp.headers = CaseInsensitiveDict({ - "Content-Type": content_type, - "Content-Length": stats.st_size, - "Last-Modified": modified, - }) + resp.headers = CaseInsensitiveDict( + { + "Content-Type": content_type, + "Content-Length": stats.st_size, + "Last-Modified": modified, + } + ) resp.raw = open(pathname, "rb") resp.close = resp.raw.close return resp - def close(self): - # type: () -> None + def close(self) -> None: pass -class InsecureHTTPAdapter(HTTPAdapter): +class _SSLContextAdapterMixin: + """Mixin to add the ``ssl_context`` constructor argument to HTTP adapters. + + The additional argument is forwarded directly to the pool manager. This allows us + to dynamically decide what SSL store to use at runtime, which is used to implement + the optional ``truststore`` backend. + """ + + def __init__( + self, + *, + ssl_context: Optional["SSLContext"] = None, + **kwargs: Any, + ) -> None: + self._ssl_context = ssl_context + super().__init__(**kwargs) + + def init_poolmanager( + self, + connections: int, + maxsize: int, + block: bool = DEFAULT_POOLBLOCK, + **pool_kwargs: Any, + ) -> "PoolManager": + if self._ssl_context is not None: + pool_kwargs.setdefault("ssl_context", self._ssl_context) + return super().init_poolmanager( # type: ignore[misc] + connections=connections, + maxsize=maxsize, + block=block, + **pool_kwargs, + ) + + +class HTTPAdapter(_SSLContextAdapterMixin, _BaseHTTPAdapter): + pass + +class CacheControlAdapter(_SSLContextAdapterMixin, _BaseCacheControlAdapter): + pass + + +class InsecureHTTPAdapter(HTTPAdapter): def cert_verify( self, - conn, # type: ConnectionPool - url, # type: str - verify, # type: Union[bool, str] - cert, # type: Optional[Union[str, Tuple[str, str]]] - ): - # type: (...) -> None + conn: ConnectionPool, + url: str, + verify: Union[bool, str], + cert: Optional[Union[str, Tuple[str, str]]], + ) -> None: super().cert_verify(conn=conn, url=url, verify=False, cert=cert) class InsecureCacheControlAdapter(CacheControlAdapter): - def cert_verify( self, - conn, # type: ConnectionPool - url, # type: str - verify, # type: Union[bool, str] - cert, # type: Optional[Union[str, Tuple[str, str]]] - ): - # type: (...) -> None + conn: ConnectionPool, + url: str, + verify: Union[bool, str], + cert: Optional[Union[str, Tuple[str, str]]], + ) -> None: super().cert_verify(conn=conn, url=url, verify=False, cert=cert) class PipSession(requests.Session): - - timeout = None # type: Optional[int] + timeout: Optional[int] = None def __init__( self, - *args, # type: Any - retries=0, # type: int - cache=None, # type: Optional[str] - trusted_hosts=(), # type: Sequence[str] - index_urls=None, # type: Optional[List[str]] - **kwargs, # type: Any - ): - # type: (...) -> None + *args: Any, + retries: int = 0, + cache: Optional[str] = None, + trusted_hosts: Sequence[str] = (), + index_urls: Optional[List[str]] = None, + ssl_context: Optional["SSLContext"] = None, + **kwargs: Any, + ) -> None: """ :param trusted_hosts: Domains not to emit warnings for when not using HTTPS. @@ -269,7 +336,7 @@ class PipSession(requests.Session): # Namespace the attribute with "pip_" just in case to prevent # possible conflicts with the base class. - self.pip_trusted_origins = [] # type: List[Tuple[str, Optional[int]]] + self.pip_trusted_origins: List[Tuple[str, Optional[int]]] = [] # Attach our User Agent to the request self.headers["User-Agent"] = user_agent() @@ -283,7 +350,6 @@ class PipSession(requests.Session): # Set the total number of retries that a particular request can # have. total=retries, - # A 503 error from PyPI typically means that the Fastly -> Origin # connection got interrupted in some way. A 503 error in general # is typically considered a transient error so we'll go ahead and @@ -291,7 +357,6 @@ class PipSession(requests.Session): # A 500 may indicate transient error in Amazon S3 # A 520 or 527 - may indicate transient error in CloudFlare status_forcelist=[500, 503, 520, 527], - # Add a small amount of back off between failed requests in # order to prevent hammering the service. backoff_factor=0.25, @@ -313,13 +378,14 @@ class PipSession(requests.Session): secure_adapter = CacheControlAdapter( cache=SafeFileCache(cache), max_retries=retries, + ssl_context=ssl_context, ) self._trusted_host_adapter = InsecureCacheControlAdapter( cache=SafeFileCache(cache), max_retries=retries, ) else: - secure_adapter = HTTPAdapter(max_retries=retries) + secure_adapter = HTTPAdapter(max_retries=retries, ssl_context=ssl_context) self._trusted_host_adapter = insecure_adapter self.mount("https://", secure_adapter) @@ -331,16 +397,16 @@ class PipSession(requests.Session): for host in trusted_hosts: self.add_trusted_host(host, suppress_logging=True) - def update_index_urls(self, new_index_urls): - # type: (List[str]) -> None + def update_index_urls(self, new_index_urls: List[str]) -> None: """ :param new_index_urls: New index urls to update the authentication handler with. """ self.auth.index_urls = new_index_urls - def add_trusted_host(self, host, source=None, suppress_logging=False): - # type: (str, Optional[str], bool) -> None + def add_trusted_host( + self, host: str, source: Optional[str] = None, suppress_logging: bool = False + ) -> None: """ :param host: It is okay to provide a host that has previously been added. @@ -348,45 +414,48 @@ class PipSession(requests.Session): string came from. """ if not suppress_logging: - msg = f'adding trusted host: {host!r}' + msg = f"adding trusted host: {host!r}" if source is not None: - msg += f' (from {source})' + msg += f" (from {source})" logger.info(msg) - host_port = parse_netloc(host) - if host_port not in self.pip_trusted_origins: - self.pip_trusted_origins.append(host_port) + parsed_host, parsed_port = parse_netloc(host) + if parsed_host is None: + raise ValueError(f"Trusted host URL must include a host part: {host!r}") + if (parsed_host, parsed_port) not in self.pip_trusted_origins: + self.pip_trusted_origins.append((parsed_host, parsed_port)) self.mount( - build_url_from_netloc(host) + '/', - self._trusted_host_adapter + build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter ) - if not host_port[1]: - # Mount wildcard ports for the same host. + self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter) + if not parsed_port: self.mount( - build_url_from_netloc(host) + ':', - self._trusted_host_adapter + build_url_from_netloc(host, scheme="http") + ":", + self._trusted_host_adapter, ) + # Mount wildcard ports for the same host. + self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter) - def iter_secure_origins(self): - # type: () -> Iterator[SecureOrigin] + def iter_secure_origins(self) -> Generator[SecureOrigin, None, None]: yield from SECURE_ORIGINS for host, port in self.pip_trusted_origins: - yield ('*', host, '*' if port is None else port) + yield ("*", host, "*" if port is None else port) - def is_secure_origin(self, location): - # type: (Link) -> bool + def is_secure_origin(self, location: Link) -> bool: # Determine if this url used a secure transport mechanism parsed = urllib.parse.urlparse(str(location)) origin_protocol, origin_host, origin_port = ( - parsed.scheme, parsed.hostname, parsed.port, + parsed.scheme, + parsed.hostname, + parsed.port, ) # The protocol to use to see if the protocol matches. # Don't count the repository type as part of the protocol: in # cases such as "git+ssh", only use "ssh". (I.e., Only verify against # the last scheme.) - origin_protocol = origin_protocol.rsplit('+', 1)[-1] + origin_protocol = origin_protocol.rsplit("+", 1)[-1] # Determine if our origin is a secure origin by looking through our # hardcoded list of secure origins, as well as any additional ones @@ -397,15 +466,15 @@ class PipSession(requests.Session): continue try: - addr = ipaddress.ip_address(origin_host) + addr = ipaddress.ip_address(origin_host or "") network = ipaddress.ip_network(secure_host) except ValueError: # We don't have both a valid address or a valid network, so # we'll check this origin against hostnames. if ( - origin_host and - origin_host.lower() != secure_host.lower() and - secure_host != "*" + origin_host + and origin_host.lower() != secure_host.lower() + and secure_host != "*" ): continue else: @@ -416,9 +485,9 @@ class PipSession(requests.Session): # Check to see if the port matches. if ( - origin_port != secure_port and - secure_port != "*" and - secure_port is not None + origin_port != secure_port + and secure_port != "*" + and secure_port is not None ): continue @@ -440,10 +509,11 @@ class PipSession(requests.Session): return False - def request(self, method, url, *args, **kwargs): - # type: (str, str, *Any, **Any) -> Response + def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response: # Allow setting a default timeout on a session kwargs.setdefault("timeout", self.timeout) + # Allow setting a default proxies on a session + kwargs.setdefault("proxies", self.proxies) # Dispatch the actual request return super().request(method, url, *args, **kwargs) diff --git a/env/Lib/site-packages/pip/_internal/network/utils.py b/env/Lib/site-packages/pip/_internal/network/utils.py index 6e5cf0d1d1a71ec90a7e167331b973dc87381000..134848ae526e54e2b18738f83088c4a17efcce96 100644 --- a/env/Lib/site-packages/pip/_internal/network/utils.py +++ b/env/Lib/site-packages/pip/_internal/network/utils.py @@ -1,4 +1,4 @@ -from typing import Dict, Iterator +from typing import Dict, Generator from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response @@ -23,40 +23,41 @@ from pip._internal.exceptions import NetworkConnectionError # you're not asking for a compressed file and will then decompress it # before sending because if that's the case I don't think it'll ever be # possible to make this work. -HEADERS = {'Accept-Encoding': 'identity'} # type: Dict[str, str] +HEADERS: Dict[str, str] = {"Accept-Encoding": "identity"} -def raise_for_status(resp): - # type: (Response) -> None - http_error_msg = '' +def raise_for_status(resp: Response) -> None: + http_error_msg = "" if isinstance(resp.reason, bytes): # We attempt to decode utf-8 first because some servers # choose to localize their reason strings. If the string # isn't utf-8, we fall back to iso-8859-1 for all other # encodings. try: - reason = resp.reason.decode('utf-8') + reason = resp.reason.decode("utf-8") except UnicodeDecodeError: - reason = resp.reason.decode('iso-8859-1') + reason = resp.reason.decode("iso-8859-1") else: reason = resp.reason if 400 <= resp.status_code < 500: http_error_msg = ( - f'{resp.status_code} Client Error: {reason} for url: {resp.url}') + f"{resp.status_code} Client Error: {reason} for url: {resp.url}" + ) elif 500 <= resp.status_code < 600: http_error_msg = ( - f'{resp.status_code} Server Error: {reason} for url: {resp.url}') + f"{resp.status_code} Server Error: {reason} for url: {resp.url}" + ) if http_error_msg: raise NetworkConnectionError(http_error_msg, response=resp) -def response_chunks(response, chunk_size=CONTENT_CHUNK_SIZE): - # type: (Response, int) -> Iterator[bytes] - """Given a requests Response, provide the data chunks. - """ +def response_chunks( + response: Response, chunk_size: int = CONTENT_CHUNK_SIZE +) -> Generator[bytes, None, None]: + """Given a requests Response, provide the data chunks.""" try: # Special case for urllib3. for chunk in response.raw.stream( diff --git a/env/Lib/site-packages/pip/_internal/network/xmlrpc.py b/env/Lib/site-packages/pip/_internal/network/xmlrpc.py index b92b8d9ae1842bec7d42599218031dfc9def62ab..4a7d55d0e50cb8b892caa021695522e5ddd54a17 100644 --- a/env/Lib/site-packages/pip/_internal/network/xmlrpc.py +++ b/env/Lib/site-packages/pip/_internal/network/xmlrpc.py @@ -21,22 +21,32 @@ class PipXmlrpcTransport(xmlrpc.client.Transport): object. """ - def __init__(self, index_url, session, use_datetime=False): - # type: (str, PipSession, bool) -> None + def __init__( + self, index_url: str, session: PipSession, use_datetime: bool = False + ) -> None: super().__init__(use_datetime) index_parts = urllib.parse.urlparse(index_url) self._scheme = index_parts.scheme self._session = session - def request(self, host, handler, request_body, verbose=False): - # type: (_HostType, str, bytes, bool) -> Tuple[_Marshallable, ...] + def request( + self, + host: "_HostType", + handler: str, + request_body: bytes, + verbose: bool = False, + ) -> Tuple["_Marshallable", ...]: assert isinstance(host, str) parts = (self._scheme, host, handler, None, None, None) url = urllib.parse.urlunparse(parts) try: - headers = {'Content-Type': 'text/xml'} - response = self._session.post(url, data=request_body, - headers=headers, stream=True) + headers = {"Content-Type": "text/xml"} + response = self._session.post( + url, + data=request_body, + headers=headers, + stream=True, + ) raise_for_status(response) self.verbose = verbose return self.parse_response(response.raw) @@ -44,6 +54,7 @@ class PipXmlrpcTransport(xmlrpc.client.Transport): assert exc.response logger.critical( "HTTP error %s while getting %s", - exc.response.status_code, url, + exc.response.status_code, + url, ) raise diff --git a/env/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 9b3650d70ccb7cd60366b139be73e1c1fd16aa2c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-39.pyc deleted file mode 100644 index c9bff16da90f9ed06485345cadb99f8bcf3e37ee..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/__pycache__/check.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-39.pyc deleted file mode 100644 index a7c7c055be58a6aa126398cec987675c806e7270..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-39.pyc deleted file mode 100644 index 98a20e7d0c02fcf50ad8cf66ec00f1f39afe7dad..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 35a76848c3e474c4d27d88a3291cfd8b52114671..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-39.pyc deleted file mode 100644 index 5e73fc27fc6968fb21520c744d820ce87a4dcd45..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-39.pyc deleted file mode 100644 index 55bb6d5b7d49abf688c5744d72ce164be203ece5..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-39.pyc deleted file mode 100644 index 2fd533621cbb12b4c57302b3581c1e95a5beba31..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-39.pyc deleted file mode 100644 index 8cbc897aa17d9f341649b692ddbc97733a75da83..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/build/metadata.py b/env/Lib/site-packages/pip/_internal/operations/build/metadata.py index 1c826835b495b15c44810f1f6b8ff4d3b2833f74..c66ac354deb035405fe0e4040dac539d28570257 100644 --- a/env/Lib/site-packages/pip/_internal/operations/build/metadata.py +++ b/env/Lib/site-packages/pip/_internal/operations/build/metadata.py @@ -3,33 +3,37 @@ import os -from pip._vendor.pep517.wrappers import Pep517HookCaller +from pip._vendor.pyproject_hooks import BuildBackendHookCaller from pip._internal.build_env import BuildEnvironment +from pip._internal.exceptions import ( + InstallationSubprocessError, + MetadataGenerationFailed, +) from pip._internal.utils.subprocess import runner_with_spinner_message from pip._internal.utils.temp_dir import TempDirectory -def generate_metadata(build_env, backend): - # type: (BuildEnvironment, Pep517HookCaller) -> str +def generate_metadata( + build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str +) -> str: """Generate metadata using mechanisms described in PEP 517. Returns the generated metadata directory. """ - metadata_tmpdir = TempDirectory( - kind="modern-metadata", globally_managed=True - ) + metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True) metadata_dir = metadata_tmpdir.path with build_env: - # Note that Pep517HookCaller implements a fallback for + # Note that BuildBackendHookCaller implements a fallback for # prepare_metadata_for_build_wheel, so we don't have to # consider the possibility that this hook doesn't exist. - runner = runner_with_spinner_message("Preparing wheel metadata") + runner = runner_with_spinner_message("Preparing metadata (pyproject.toml)") with backend.subprocess_runner(runner): - distinfo_dir = backend.prepare_metadata_for_build_wheel( - metadata_dir - ) + try: + distinfo_dir = backend.prepare_metadata_for_build_wheel(metadata_dir) + except InstallationSubprocessError as error: + raise MetadataGenerationFailed(package_details=details) from error return os.path.join(metadata_dir, distinfo_dir) diff --git a/env/Lib/site-packages/pip/_internal/operations/build/metadata_legacy.py b/env/Lib/site-packages/pip/_internal/operations/build/metadata_legacy.py index f46538a07f4914f373bb5f9b1ec36a0f2cc2ec50..e60988d643e007801f79e8718354e7d00c7acf18 100644 --- a/env/Lib/site-packages/pip/_internal/operations/build/metadata_legacy.py +++ b/env/Lib/site-packages/pip/_internal/operations/build/metadata_legacy.py @@ -5,7 +5,12 @@ import logging import os from pip._internal.build_env import BuildEnvironment -from pip._internal.exceptions import InstallationError +from pip._internal.cli.spinners import open_spinner +from pip._internal.exceptions import ( + InstallationError, + InstallationSubprocessError, + MetadataGenerationFailed, +) from pip._internal.utils.setuptools_build import make_setuptools_egg_info_args from pip._internal.utils.subprocess import call_subprocess from pip._internal.utils.temp_dir import TempDirectory @@ -13,49 +18,39 @@ from pip._internal.utils.temp_dir import TempDirectory logger = logging.getLogger(__name__) -def _find_egg_info(directory): - # type: (str) -> str - """Find an .egg-info subdirectory in `directory`. - """ - filenames = [ - f for f in os.listdir(directory) if f.endswith(".egg-info") - ] +def _find_egg_info(directory: str) -> str: + """Find an .egg-info subdirectory in `directory`.""" + filenames = [f for f in os.listdir(directory) if f.endswith(".egg-info")] if not filenames: - raise InstallationError( - f"No .egg-info directory found in {directory}" - ) + raise InstallationError(f"No .egg-info directory found in {directory}") if len(filenames) > 1: raise InstallationError( - "More than one .egg-info directory found in {}".format( - directory - ) + "More than one .egg-info directory found in {}".format(directory) ) return os.path.join(directory, filenames[0]) def generate_metadata( - build_env, # type: BuildEnvironment - setup_py_path, # type: str - source_dir, # type: str - isolated, # type: bool - details, # type: str -): - # type: (...) -> str + build_env: BuildEnvironment, + setup_py_path: str, + source_dir: str, + isolated: bool, + details: str, +) -> str: """Generate metadata using setup.py-based defacto mechanisms. Returns the generated metadata directory. """ logger.debug( - 'Running setup.py (path:%s) egg_info for package %s', - setup_py_path, details, + "Running setup.py (path:%s) egg_info for package %s", + setup_py_path, + details, ) - egg_info_dir = TempDirectory( - kind="pip-egg-info", globally_managed=True - ).path + egg_info_dir = TempDirectory(kind="pip-egg-info", globally_managed=True).path args = make_setuptools_egg_info_args( setup_py_path, @@ -64,11 +59,16 @@ def generate_metadata( ) with build_env: - call_subprocess( - args, - cwd=source_dir, - command_desc='python setup.py egg_info', - ) + with open_spinner("Preparing metadata (setup.py)") as spinner: + try: + call_subprocess( + args, + cwd=source_dir, + command_desc="python setup.py egg_info", + spinner=spinner, + ) + except InstallationSubprocessError as error: + raise MetadataGenerationFailed(package_details=details) from error # Return the .egg-info directory. return _find_egg_info(egg_info_dir) diff --git a/env/Lib/site-packages/pip/_internal/operations/build/wheel.py b/env/Lib/site-packages/pip/_internal/operations/build/wheel.py index 903bd7a0567b50f15aa518bf5ad310b13608ac2c..064811ad11bb07b2b7bc8e30ec6c03f21997d6b2 100644 --- a/env/Lib/site-packages/pip/_internal/operations/build/wheel.py +++ b/env/Lib/site-packages/pip/_internal/operations/build/wheel.py @@ -2,7 +2,7 @@ import logging import os from typing import Optional -from pip._vendor.pep517.wrappers import Pep517HookCaller +from pip._vendor.pyproject_hooks import BuildBackendHookCaller from pip._internal.utils.subprocess import runner_with_spinner_message @@ -10,22 +10,21 @@ logger = logging.getLogger(__name__) def build_wheel_pep517( - name, # type: str - backend, # type: Pep517HookCaller - metadata_directory, # type: str - tempd, # type: str -): - # type: (...) -> Optional[str] + name: str, + backend: BuildBackendHookCaller, + metadata_directory: str, + tempd: str, +) -> Optional[str]: """Build one InstallRequirement using the PEP 517 build process. Returns path to wheel if successfully built. Otherwise, returns None. """ assert metadata_directory is not None try: - logger.debug('Destination directory: %s', tempd) + logger.debug("Destination directory: %s", tempd) runner = runner_with_spinner_message( - f'Building wheel for {name} (PEP 517)' + f"Building wheel for {name} (pyproject.toml)" ) with backend.subprocess_runner(runner): wheel_name = backend.build_wheel( @@ -33,6 +32,6 @@ def build_wheel_pep517( metadata_directory=metadata_directory, ) except Exception: - logger.error('Failed building wheel for %s', name) + logger.error("Failed building wheel for %s", name) return None return os.path.join(tempd, wheel_name) diff --git a/env/Lib/site-packages/pip/_internal/operations/build/wheel_legacy.py b/env/Lib/site-packages/pip/_internal/operations/build/wheel_legacy.py index 755c3bc83a2df500cb9cef69cbeb6f4b7dc0d8a0..c5f0492ccbe9c727c835c12c84a1d8340366fa1e 100644 --- a/env/Lib/site-packages/pip/_internal/operations/build/wheel_legacy.py +++ b/env/Lib/site-packages/pip/_internal/operations/build/wheel_legacy.py @@ -4,59 +4,51 @@ from typing import List, Optional from pip._internal.cli.spinners import open_spinner from pip._internal.utils.setuptools_build import make_setuptools_bdist_wheel_args -from pip._internal.utils.subprocess import ( - LOG_DIVIDER, - call_subprocess, - format_command_args, -) +from pip._internal.utils.subprocess import call_subprocess, format_command_args logger = logging.getLogger(__name__) def format_command_result( - command_args, # type: List[str] - command_output, # type: str -): - # type: (...) -> str + command_args: List[str], + command_output: str, +) -> str: """Format command information for logging.""" command_desc = format_command_args(command_args) - text = f'Command arguments: {command_desc}\n' + text = f"Command arguments: {command_desc}\n" if not command_output: - text += 'Command output: None' + text += "Command output: None" elif logger.getEffectiveLevel() > logging.DEBUG: - text += 'Command output: [use --verbose to show]' + text += "Command output: [use --verbose to show]" else: - if not command_output.endswith('\n'): - command_output += '\n' - text += f'Command output:\n{command_output}{LOG_DIVIDER}' + if not command_output.endswith("\n"): + command_output += "\n" + text += f"Command output:\n{command_output}" return text def get_legacy_build_wheel_path( - names, # type: List[str] - temp_dir, # type: str - name, # type: str - command_args, # type: List[str] - command_output, # type: str -): - # type: (...) -> Optional[str] + names: List[str], + temp_dir: str, + name: str, + command_args: List[str], + command_output: str, +) -> Optional[str]: """Return the path to the wheel in the temporary build directory.""" # Sort for determinism. names = sorted(names) if not names: - msg = ( - 'Legacy build of wheel for {!r} created no files.\n' - ).format(name) + msg = ("Legacy build of wheel for {!r} created no files.\n").format(name) msg += format_command_result(command_args, command_output) logger.warning(msg) return None if len(names) > 1: msg = ( - 'Legacy build of wheel for {!r} created more than one file.\n' - 'Filenames (choosing first): {}\n' + "Legacy build of wheel for {!r} created more than one file.\n" + "Filenames (choosing first): {}\n" ).format(name, names) msg += format_command_result(command_args, command_output) logger.warning(msg) @@ -65,14 +57,13 @@ def get_legacy_build_wheel_path( def build_wheel_legacy( - name, # type: str - setup_py_path, # type: str - source_dir, # type: str - global_options, # type: List[str] - build_options, # type: List[str] - tempd, # type: str -): - # type: (...) -> Optional[str] + name: str, + setup_py_path: str, + source_dir: str, + global_options: List[str], + build_options: List[str], + tempd: str, +) -> Optional[str]: """Build one unpacked package using the "legacy" build process. Returns path to wheel if successfully built. Otherwise, returns None. @@ -84,19 +75,20 @@ def build_wheel_legacy( destination_dir=tempd, ) - spin_message = f'Building wheel for {name} (setup.py)' + spin_message = f"Building wheel for {name} (setup.py)" with open_spinner(spin_message) as spinner: - logger.debug('Destination directory: %s', tempd) + logger.debug("Destination directory: %s", tempd) try: output = call_subprocess( wheel_args, + command_desc="python setup.py bdist_wheel", cwd=source_dir, spinner=spinner, ) except Exception: spinner.finish("error") - logger.error('Failed building wheel for %s', name) + logger.error("Failed building wheel for %s", name) return None names = os.listdir(tempd) diff --git a/env/Lib/site-packages/pip/_internal/operations/check.py b/env/Lib/site-packages/pip/_internal/operations/check.py index 5699c0b91ee5a98d50095c4993d7609de51fea7a..2610459228fe87836fd9311d45b3f77b676c0f11 100644 --- a/env/Lib/site-packages/pip/_internal/operations/check.py +++ b/env/Lib/site-packages/pip/_internal/operations/check.py @@ -2,87 +2,91 @@ """ import logging -from collections import namedtuple -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple +from typing import Callable, Dict, List, NamedTuple, Optional, Set, Tuple -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.pkg_resources import RequirementParseError +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.specifiers import LegacySpecifier +from pip._vendor.packaging.utils import NormalizedName, canonicalize_name +from pip._vendor.packaging.version import LegacyVersion from pip._internal.distributions import make_distribution_for_install_requirement +from pip._internal.metadata import get_default_environment +from pip._internal.metadata.base import DistributionVersion from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.misc import get_installed_distributions - -if TYPE_CHECKING: - from pip._vendor.packaging.utils import NormalizedName +from pip._internal.utils.deprecation import deprecated logger = logging.getLogger(__name__) + +class PackageDetails(NamedTuple): + version: DistributionVersion + dependencies: List[Requirement] + + # Shorthands -PackageSet = Dict['NormalizedName', 'PackageDetails'] -Missing = Tuple[str, Any] -Conflicting = Tuple[str, str, Any] +PackageSet = Dict[NormalizedName, PackageDetails] +Missing = Tuple[NormalizedName, Requirement] +Conflicting = Tuple[NormalizedName, DistributionVersion, Requirement] -MissingDict = Dict['NormalizedName', List[Missing]] -ConflictingDict = Dict['NormalizedName', List[Conflicting]] +MissingDict = Dict[NormalizedName, List[Missing]] +ConflictingDict = Dict[NormalizedName, List[Conflicting]] CheckResult = Tuple[MissingDict, ConflictingDict] ConflictDetails = Tuple[PackageSet, CheckResult] -PackageDetails = namedtuple('PackageDetails', ['version', 'requires']) - - -def create_package_set_from_installed(**kwargs: Any) -> Tuple["PackageSet", bool]: - """Converts a list of distributions into a PackageSet. - """ - # Default to using all packages installed on the system - if kwargs == {}: - kwargs = {"local_only": False, "skip": ()} +def create_package_set_from_installed() -> Tuple[PackageSet, bool]: + """Converts a list of distributions into a PackageSet.""" package_set = {} problems = False - for dist in get_installed_distributions(**kwargs): - name = canonicalize_name(dist.project_name) + env = get_default_environment() + for dist in env.iter_installed_distributions(local_only=False, skip=()): + name = dist.canonical_name try: - package_set[name] = PackageDetails(dist.version, dist.requires()) - except (OSError, RequirementParseError) as e: - # Don't crash on unreadable or broken metadata + dependencies = list(dist.iter_dependencies()) + package_set[name] = PackageDetails(dist.version, dependencies) + except (OSError, ValueError) as e: + # Don't crash on unreadable or broken metadata. logger.warning("Error parsing requirements for %s: %s", name, e) problems = True return package_set, problems -def check_package_set(package_set, should_ignore=None): - # type: (PackageSet, Optional[Callable[[str], bool]]) -> CheckResult +def check_package_set( + package_set: PackageSet, should_ignore: Optional[Callable[[str], bool]] = None +) -> CheckResult: """Check if a package set is consistent If should_ignore is passed, it should be a callable that takes a package name and returns a boolean. """ + warn_legacy_versions_and_specifiers(package_set) + missing = {} conflicting = {} for package_name, package_detail in package_set.items(): # Info about dependencies of package_name - missing_deps = set() # type: Set[Missing] - conflicting_deps = set() # type: Set[Conflicting] + missing_deps: Set[Missing] = set() + conflicting_deps: Set[Conflicting] = set() if should_ignore and should_ignore(package_name): continue - for req in package_detail.requires: - name = canonicalize_name(req.project_name) + for req in package_detail.dependencies: + name = canonicalize_name(req.name) # Check if it's missing if name not in package_set: missed = True if req.marker is not None: - missed = req.marker.evaluate() + missed = req.marker.evaluate({"extra": ""}) if missed: missing_deps.add((name, req)) continue # Check if there's a conflict - version = package_set[name].version # type: str + version = package_set[name].version if not req.specifier.contains(version, prereleases=True): conflicting_deps.add((name, version, req)) @@ -94,8 +98,7 @@ def check_package_set(package_set, should_ignore=None): return missing, conflicting -def check_install_conflicts(to_install): - # type: (List[InstallRequirement]) -> ConflictDetails +def check_install_conflicts(to_install: List[InstallRequirement]) -> ConflictDetails: """For checking if the dependency graph would be consistent after \ installing given requirements """ @@ -111,43 +114,74 @@ def check_install_conflicts(to_install): package_set, check_package_set( package_set, should_ignore=lambda name: name not in whitelist - ) + ), ) -def _simulate_installation_of(to_install, package_set): - # type: (List[InstallRequirement], PackageSet) -> Set[NormalizedName] - """Computes the version of packages after installing to_install. - """ - +def _simulate_installation_of( + to_install: List[InstallRequirement], package_set: PackageSet +) -> Set[NormalizedName]: + """Computes the version of packages after installing to_install.""" # Keep track of packages that were installed installed = set() # Modify it as installing requirement_set would (assuming no errors) for inst_req in to_install: abstract_dist = make_distribution_for_install_requirement(inst_req) - dist = abstract_dist.get_pkg_resources_distribution() - - assert dist is not None - name = canonicalize_name(dist.key) - package_set[name] = PackageDetails(dist.version, dist.requires()) + dist = abstract_dist.get_metadata_distribution() + name = dist.canonical_name + package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies())) installed.add(name) return installed -def _create_whitelist(would_be_installed, package_set): - # type: (Set[NormalizedName], PackageSet) -> Set[NormalizedName] +def _create_whitelist( + would_be_installed: Set[NormalizedName], package_set: PackageSet +) -> Set[NormalizedName]: packages_affected = set(would_be_installed) for package_name in package_set: if package_name in packages_affected: continue - for req in package_set[package_name].requires: + for req in package_set[package_name].dependencies: if canonicalize_name(req.name) in packages_affected: packages_affected.add(package_name) break return packages_affected + + +def warn_legacy_versions_and_specifiers(package_set: PackageSet) -> None: + for project_name, package_details in package_set.items(): + if isinstance(package_details.version, LegacyVersion): + deprecated( + reason=( + f"{project_name} {package_details.version} " + f"has a non-standard version number." + ), + replacement=( + f"to upgrade to a newer version of {project_name} " + f"or contact the author to suggest that they " + f"release a version with a conforming version number" + ), + issue=12063, + gone_in="23.3", + ) + for dep in package_details.dependencies: + if any(isinstance(spec, LegacySpecifier) for spec in dep.specifier): + deprecated( + reason=( + f"{project_name} {package_details.version} " + f"has a non-standard dependency specifier {dep}." + ), + replacement=( + f"to upgrade to a newer version of {project_name} " + f"or contact the author to suggest that they " + f"release a version with a conforming dependency specifiers" + ), + issue=12063, + gone_in="23.3", + ) diff --git a/env/Lib/site-packages/pip/_internal/operations/freeze.py b/env/Lib/site-packages/pip/_internal/operations/freeze.py index f34a9d4be7ecadc3a01f8a91f41b6ddcadd6e69e..354456845141eba23dce26482aa6d4196f4804de 100644 --- a/env/Lib/site-packages/pip/_internal/operations/freeze.py +++ b/env/Lib/site-packages/pip/_internal/operations/freeze.py @@ -1,73 +1,46 @@ import collections import logging import os -from typing import ( - Container, - Dict, - Iterable, - Iterator, - List, - Optional, - Set, - Tuple, - Union, -) +from typing import Container, Dict, Generator, Iterable, List, NamedTuple, Optional, Set from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.pkg_resources import Distribution, Requirement, RequirementParseError +from pip._vendor.packaging.version import Version from pip._internal.exceptions import BadCommand, InstallationError +from pip._internal.metadata import BaseDistribution, get_environment from pip._internal.req.constructors import ( install_req_from_editable, install_req_from_line, ) from pip._internal.req.req_file import COMMENT_RE -from pip._internal.utils.direct_url_helpers import ( - direct_url_as_pep440_direct_reference, - dist_get_direct_url, -) -from pip._internal.utils.misc import dist_is_editable, get_installed_distributions +from pip._internal.utils.direct_url_helpers import direct_url_as_pep440_direct_reference logger = logging.getLogger(__name__) -RequirementInfo = Tuple[Optional[Union[str, Requirement]], bool, List[str]] - -def freeze( - requirement=None, # type: Optional[List[str]] - find_links=None, # type: Optional[List[str]] - local_only=False, # type: bool - user_only=False, # type: bool - paths=None, # type: Optional[List[str]] - isolated=False, # type: bool - exclude_editable=False, # type: bool - skip=() # type: Container[str] -): - # type: (...) -> Iterator[str] - find_links = find_links or [] +class _EditableInfo(NamedTuple): + requirement: str + comments: List[str] - for link in find_links: - yield f'-f {link}' - installations = {} # type: Dict[str, FrozenRequirement] - for dist in get_installed_distributions( - local_only=local_only, - skip=(), - user_only=user_only, - paths=paths - ): - try: - req = FrozenRequirement.from_dist(dist) - except RequirementParseError as exc: - # We include dist rather than dist.project_name because the - # dist string includes more information, like the version and - # location. We also include the exception message to aid - # troubleshooting. - logger.warning( - 'Could not generate requirement for distribution %r: %s', - dist, exc - ) - continue +def freeze( + requirement: Optional[List[str]] = None, + local_only: bool = False, + user_only: bool = False, + paths: Optional[List[str]] = None, + isolated: bool = False, + exclude_editable: bool = False, + skip: Container[str] = (), +) -> Generator[str, None, None]: + installations: Dict[str, FrozenRequirement] = {} + + dists = get_environment(paths).iter_installed_distributions( + local_only=local_only, + skip=(), + user_only=user_only, + ) + for dist in dists: + req = FrozenRequirement.from_dist(dist) if exclude_editable and req.editable: continue installations[req.canonical_name] = req @@ -77,42 +50,50 @@ def freeze( # should only be emitted once, even if the same option is in multiple # requirements files, so we need to keep track of what has been emitted # so that we don't emit it again if it's seen again - emitted_options = set() # type: Set[str] + emitted_options: Set[str] = set() # keep track of which files a requirement is in so that we can # give an accurate warning if a requirement appears multiple times. - req_files = collections.defaultdict(list) # type: Dict[str, List[str]] + req_files: Dict[str, List[str]] = collections.defaultdict(list) for req_file_path in requirement: with open(req_file_path) as req_file: for line in req_file: - if (not line.strip() or - line.strip().startswith('#') or - line.startswith(( - '-r', '--requirement', - '-f', '--find-links', - '-i', '--index-url', - '--pre', - '--trusted-host', - '--process-dependency-links', - '--extra-index-url', - '--use-feature'))): + if ( + not line.strip() + or line.strip().startswith("#") + or line.startswith( + ( + "-r", + "--requirement", + "-f", + "--find-links", + "-i", + "--index-url", + "--pre", + "--trusted-host", + "--process-dependency-links", + "--extra-index-url", + "--use-feature", + ) + ) + ): line = line.rstrip() if line not in emitted_options: emitted_options.add(line) yield line continue - if line.startswith('-e') or line.startswith('--editable'): - if line.startswith('-e'): + if line.startswith("-e") or line.startswith("--editable"): + if line.startswith("-e"): line = line[2:].strip() else: - line = line[len('--editable'):].strip().lstrip('=') + line = line[len("--editable") :].strip().lstrip("=") line_req = install_req_from_editable( line, isolated=isolated, ) else: line_req = install_req_from_line( - COMMENT_RE.sub('', line).strip(), + COMMENT_RE.sub("", line).strip(), isolated=isolated, ) @@ -120,15 +101,15 @@ def freeze( logger.info( "Skipping line in requirement file [%s] because " "it's not clear what it would install: %s", - req_file_path, line.strip(), + req_file_path, + line.strip(), ) logger.info( " (add #egg=PackageName to the URL to avoid" " this warning)" ) else: - line_req_canonical_name = canonicalize_name( - line_req.name) + line_req_canonical_name = canonicalize_name(line_req.name) if line_req_canonical_name not in installations: # either it's not installed, or it is installed # but has been processed already @@ -137,14 +118,13 @@ def freeze( "Requirement file [%s] contains %s, but " "package %r is not installed", req_file_path, - COMMENT_RE.sub('', line).strip(), - line_req.name + COMMENT_RE.sub("", line).strip(), + line_req.name, ) else: req_files[line_req.name].append(req_file_path) else: - yield str(installations[ - line_req_canonical_name]).rstrip() + yield str(installations[line_req_canonical_name]).rstrip() del installations[line_req_canonical_name] req_files[line_req.name].append(req_file_path) @@ -152,83 +132,99 @@ def freeze( # single requirements file or in different requirements files). for name, files in req_files.items(): if len(files) > 1: - logger.warning("Requirement %s included multiple times [%s]", - name, ', '.join(sorted(set(files)))) + logger.warning( + "Requirement %s included multiple times [%s]", + name, + ", ".join(sorted(set(files))), + ) - yield( - '## The following requirements were added by ' - 'pip freeze:' - ) - for installation in sorted( - installations.values(), key=lambda x: x.name.lower()): + yield ("## The following requirements were added by pip freeze:") + for installation in sorted(installations.values(), key=lambda x: x.name.lower()): if installation.canonical_name not in skip: yield str(installation).rstrip() -def get_requirement_info(dist): - # type: (Distribution) -> RequirementInfo +def _format_as_name_version(dist: BaseDistribution) -> str: + dist_version = dist.version + if isinstance(dist_version, Version): + return f"{dist.raw_name}=={dist_version}" + return f"{dist.raw_name}==={dist_version}" + + +def _get_editable_info(dist: BaseDistribution) -> _EditableInfo: """ - Compute and return values (req, editable, comments) for use in + Compute and return values (req, comments) for use in FrozenRequirement.from_dist(). """ - if not dist_is_editable(dist): - return (None, False, []) + editable_project_location = dist.editable_project_location + assert editable_project_location + location = os.path.normcase(os.path.abspath(editable_project_location)) - location = os.path.normcase(os.path.abspath(dist.location)) + from pip._internal.vcs import RemoteNotFoundError, RemoteNotValidError, vcs - from pip._internal.vcs import RemoteNotFoundError, vcs vcs_backend = vcs.get_backend_for_dir(location) if vcs_backend is None: - req = dist.as_requirement() + display = _format_as_name_version(dist) logger.debug( - 'No VCS found for editable requirement "%s" in: %r', req, + 'No VCS found for editable requirement "%s" in: %r', + display, location, ) - comments = [ - f'# Editable install with no version control ({req})' - ] - return (location, True, comments) + return _EditableInfo( + requirement=location, + comments=[f"# Editable install with no version control ({display})"], + ) + + vcs_name = type(vcs_backend).__name__ try: - req = vcs_backend.get_src_requirement(location, dist.project_name) + req = vcs_backend.get_src_requirement(location, dist.raw_name) except RemoteNotFoundError: - req = dist.as_requirement() - comments = [ - '# Editable {} install with no remote ({})'.format( - type(vcs_backend).__name__, req, - ) - ] - return (location, True, comments) - + display = _format_as_name_version(dist) + return _EditableInfo( + requirement=location, + comments=[f"# Editable {vcs_name} install with no remote ({display})"], + ) + except RemoteNotValidError as ex: + display = _format_as_name_version(dist) + return _EditableInfo( + requirement=location, + comments=[ + f"# Editable {vcs_name} install ({display}) with either a deleted " + f"local remote or invalid URI:", + f"# '{ex.url}'", + ], + ) except BadCommand: logger.warning( - 'cannot determine version of editable source in %s ' - '(%s command not found in path)', + "cannot determine version of editable source in %s " + "(%s command not found in path)", location, vcs_backend.name, ) - return (None, True, []) - + return _EditableInfo(requirement=location, comments=[]) except InstallationError as exc: - logger.warning( - "Error when trying to get requirement for VCS system %s, " - "falling back to uneditable format", exc - ) + logger.warning("Error when trying to get requirement for VCS system %s", exc) else: - return (req, True, []) + return _EditableInfo(requirement=req, comments=[]) - logger.warning( - 'Could not determine repository location of %s', location - ) - comments = ['## !! Could not determine repository location'] + logger.warning("Could not determine repository location of %s", location) - return (None, False, comments) + return _EditableInfo( + requirement=location, + comments=["## !! Could not determine repository location"], + ) class FrozenRequirement: - def __init__(self, name, req, editable, comments=()): - # type: (str, Union[str, Requirement], bool, Iterable[str]) -> None + def __init__( + self, + name: str, + req: str, + editable: bool, + comments: Iterable[str] = (), + ) -> None: self.name = name self.canonical_name = canonicalize_name(name) self.req = req @@ -236,29 +232,24 @@ class FrozenRequirement: self.comments = comments @classmethod - def from_dist(cls, dist): - # type: (Distribution) -> FrozenRequirement - # TODO `get_requirement_info` is taking care of editable requirements. - # TODO This should be refactored when we will add detection of - # editable that provide .dist-info metadata. - req, editable, comments = get_requirement_info(dist) - if req is None and not editable: - # if PEP 610 metadata is present, attempt to use it - direct_url = dist_get_direct_url(dist) + def from_dist(cls, dist: BaseDistribution) -> "FrozenRequirement": + editable = dist.editable + if editable: + req, comments = _get_editable_info(dist) + else: + comments = [] + direct_url = dist.direct_url if direct_url: - req = direct_url_as_pep440_direct_reference( - direct_url, dist.project_name - ) - comments = [] - if req is None: - # name==version requirement - req = dist.as_requirement() + # if PEP 610 metadata is present, use it + req = direct_url_as_pep440_direct_reference(direct_url, dist.raw_name) + else: + # name==version requirement + req = _format_as_name_version(dist) - return cls(dist.project_name, req, editable, comments=comments) + return cls(dist.raw_name, req, editable, comments=comments) - def __str__(self): - # type: () -> str + def __str__(self) -> str: req = self.req if self.editable: - req = f'-e {req}' - return '\n'.join(list(self.comments) + [str(req)]) + '\n' + req = f"-e {req}" + return "\n".join(list(self.comments) + [str(req)]) + "\n" diff --git a/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 9a65ac0c0f5bbfdfe368092284022fe9c568d9b6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/editable_legacy.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/editable_legacy.cpython-39.pyc deleted file mode 100644 index cee2f90ad4a6ea80e854079da5d3fd9c0f8cdbf9..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/editable_legacy.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/legacy.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/legacy.cpython-39.pyc deleted file mode 100644 index 169e89e976b5a41b8890758f11cda62494d5cc2f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/legacy.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-39.pyc deleted file mode 100644 index 7e73e396d930cefd1f44f6945a056b33128e6174..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/operations/install/editable_legacy.py b/env/Lib/site-packages/pip/_internal/operations/install/editable_legacy.py index 6882c475cacf077cf2dbb9e08a21efdab524ad16..bebe24e6d3ac321523e0442d28b77b6e6df85970 100644 --- a/env/Lib/site-packages/pip/_internal/operations/install/editable_legacy.py +++ b/env/Lib/site-packages/pip/_internal/operations/install/editable_legacy.py @@ -1,7 +1,7 @@ """Legacy editable installation process, i.e. `setup.py develop`. """ import logging -from typing import List, Optional, Sequence +from typing import Optional, Sequence from pip._internal.build_env import BuildEnvironment from pip._internal.utils.logging import indent_log @@ -12,27 +12,25 @@ logger = logging.getLogger(__name__) def install_editable( - install_options, # type: List[str] - global_options, # type: Sequence[str] - prefix, # type: Optional[str] - home, # type: Optional[str] - use_user_site, # type: bool - name, # type: str - setup_py_path, # type: str - isolated, # type: bool - build_env, # type: BuildEnvironment - unpacked_source_directory, # type: str -): - # type: (...) -> None + *, + global_options: Sequence[str], + prefix: Optional[str], + home: Optional[str], + use_user_site: bool, + name: str, + setup_py_path: str, + isolated: bool, + build_env: BuildEnvironment, + unpacked_source_directory: str, +) -> None: """Install a package in editable mode. Most arguments are pass-through to setuptools. """ - logger.info('Running setup.py develop for %s', name) + logger.info("Running setup.py develop for %s", name) args = make_setuptools_develop_args( setup_py_path, global_options=global_options, - install_options=install_options, no_user_config=isolated, prefix=prefix, home=home, @@ -43,5 +41,6 @@ def install_editable( with build_env: call_subprocess( args, + command_desc="python setup.py develop", cwd=unpacked_source_directory, ) diff --git a/env/Lib/site-packages/pip/_internal/operations/install/legacy.py b/env/Lib/site-packages/pip/_internal/operations/install/legacy.py deleted file mode 100644 index 41d0c1f9d0e68d5447d5af9765387438d97bf743..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_internal/operations/install/legacy.py +++ /dev/null @@ -1,125 +0,0 @@ -"""Legacy installation process, i.e. `setup.py install`. -""" - -import logging -import os -import sys -from distutils.util import change_root -from typing import List, Optional, Sequence - -from pip._internal.build_env import BuildEnvironment -from pip._internal.exceptions import InstallationError -from pip._internal.models.scheme import Scheme -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import ensure_dir -from pip._internal.utils.setuptools_build import make_setuptools_install_args -from pip._internal.utils.subprocess import runner_with_spinner_message -from pip._internal.utils.temp_dir import TempDirectory - -logger = logging.getLogger(__name__) - - -class LegacyInstallFailure(Exception): - def __init__(self): - # type: () -> None - self.parent = sys.exc_info() - - -def install( - install_options, # type: List[str] - global_options, # type: Sequence[str] - root, # type: Optional[str] - home, # type: Optional[str] - prefix, # type: Optional[str] - use_user_site, # type: bool - pycompile, # type: bool - scheme, # type: Scheme - setup_py_path, # type: str - isolated, # type: bool - req_name, # type: str - build_env, # type: BuildEnvironment - unpacked_source_directory, # type: str - req_description, # type: str -): - # type: (...) -> bool - - header_dir = scheme.headers - - with TempDirectory(kind="record") as temp_dir: - try: - record_filename = os.path.join(temp_dir.path, 'install-record.txt') - install_args = make_setuptools_install_args( - setup_py_path, - global_options=global_options, - install_options=install_options, - record_filename=record_filename, - root=root, - prefix=prefix, - header_dir=header_dir, - home=home, - use_user_site=use_user_site, - no_user_config=isolated, - pycompile=pycompile, - ) - - runner = runner_with_spinner_message( - f"Running setup.py install for {req_name}" - ) - with indent_log(), build_env: - runner( - cmd=install_args, - cwd=unpacked_source_directory, - ) - - if not os.path.exists(record_filename): - logger.debug('Record file %s not found', record_filename) - # Signal to the caller that we didn't install the new package - return False - - except Exception: - # Signal to the caller that we didn't install the new package - raise LegacyInstallFailure - - # At this point, we have successfully installed the requirement. - - # We intentionally do not use any encoding to read the file because - # setuptools writes the file using distutils.file_util.write_file, - # which does not specify an encoding. - with open(record_filename) as f: - record_lines = f.read().splitlines() - - def prepend_root(path): - # type: (str) -> str - if root is None or not os.path.isabs(path): - return path - else: - return change_root(root, path) - - for line in record_lines: - directory = os.path.dirname(line) - if directory.endswith('.egg-info'): - egg_info_dir = prepend_root(directory) - break - else: - message = ( - "{} did not indicate that it installed an " - ".egg-info directory. Only setup.py projects " - "generating .egg-info directories are supported." - ).format(req_description) - raise InstallationError(message) - - new_lines = [] - for line in record_lines: - filename = line.strip() - if os.path.isdir(filename): - filename += os.path.sep - new_lines.append( - os.path.relpath(prepend_root(filename), egg_info_dir) - ) - new_lines.sort() - ensure_dir(egg_info_dir) - inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt') - with open(inst_files_path, 'w') as f: - f.write('\n'.join(new_lines) + '\n') - - return True diff --git a/env/Lib/site-packages/pip/_internal/operations/install/wheel.py b/env/Lib/site-packages/pip/_internal/operations/install/wheel.py index 10e5b15fd5c0e430bb99c76ac0bfb280d2eee735..a8cd1330f0f73ac76832bdbd6b455b10bd91ba83 100644 --- a/env/Lib/site-packages/pip/_internal/operations/install/wheel.py +++ b/env/Lib/site-packages/pip/_internal/operations/install/wheel.py @@ -22,6 +22,7 @@ from typing import ( BinaryIO, Callable, Dict, + Generator, Iterable, Iterator, List, @@ -35,14 +36,17 @@ from typing import ( ) from zipfile import ZipFile, ZipInfo -from pip._vendor import pkg_resources from pip._vendor.distlib.scripts import ScriptMaker from pip._vendor.distlib.util import get_export_entry -from pip._vendor.pkg_resources import Distribution -from pip._vendor.six import ensure_str, ensure_text, reraise +from pip._vendor.packaging.utils import canonicalize_name from pip._internal.exceptions import InstallationError from pip._internal.locations import get_major_minor_version +from pip._internal.metadata import ( + BaseDistribution, + FilesystemWheel, + get_wheel_distribution, +) from pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl from pip._internal.models.scheme import SCHEME_KEYS, Scheme from pip._internal.utils.filesystem import adjacent_tmp_file, replace @@ -53,98 +57,76 @@ from pip._internal.utils.unpacking import ( set_extracted_file_to_default_mode_plus_executable, zip_item_is_executable, ) -from pip._internal.utils.wheel import parse_wheel, pkg_resources_distribution_for_wheel +from pip._internal.utils.wheel import parse_wheel if TYPE_CHECKING: from typing import Protocol class File(Protocol): - src_record_path = None # type: RecordPath - dest_path = None # type: str - changed = None # type: bool + src_record_path: "RecordPath" + dest_path: str + changed: bool - def save(self): - # type: () -> None + def save(self) -> None: pass logger = logging.getLogger(__name__) -RecordPath = NewType('RecordPath', str) +RecordPath = NewType("RecordPath", str) InstalledCSVRow = Tuple[RecordPath, str, Union[int, str]] -def rehash(path, blocksize=1 << 20): - # type: (str, int) -> Tuple[str, str] +def rehash(path: str, blocksize: int = 1 << 20) -> Tuple[str, str]: """Return (encoded_digest, length) for path using hashlib.sha256()""" h, length = hash_file(path, blocksize) - digest = 'sha256=' + urlsafe_b64encode( - h.digest() - ).decode('latin1').rstrip('=') + digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=") return (digest, str(length)) -def csv_io_kwargs(mode): - # type: (str) -> Dict[str, Any] +def csv_io_kwargs(mode: str) -> Dict[str, Any]: """Return keyword arguments to properly open a CSV file in the given mode. """ - return {'mode': mode, 'newline': '', 'encoding': 'utf-8'} + return {"mode": mode, "newline": "", "encoding": "utf-8"} -def fix_script(path): - # type: (str) -> bool +def fix_script(path: str) -> bool: """Replace #!python with #!/path/to/python Return True if file was changed. """ # XXX RECORD hashes will need to be updated assert os.path.isfile(path) - with open(path, 'rb') as script: + with open(path, "rb") as script: firstline = script.readline() - if not firstline.startswith(b'#!python'): + if not firstline.startswith(b"#!python"): return False exename = sys.executable.encode(sys.getfilesystemencoding()) - firstline = b'#!' + exename + os.linesep.encode("ascii") + firstline = b"#!" + exename + os.linesep.encode("ascii") rest = script.read() - with open(path, 'wb') as script: + with open(path, "wb") as script: script.write(firstline) script.write(rest) return True -def wheel_root_is_purelib(metadata): - # type: (Message) -> bool +def wheel_root_is_purelib(metadata: Message) -> bool: return metadata.get("Root-Is-Purelib", "").lower() == "true" -def get_entrypoints(distribution): - # type: (Distribution) -> Tuple[Dict[str, str], Dict[str, str]] - # get the entry points and then the script names - try: - console = distribution.get_entry_map('console_scripts') - gui = distribution.get_entry_map('gui_scripts') - except KeyError: - # Our dict-based Distribution raises KeyError if entry_points.txt - # doesn't exist. - return {}, {} - - def _split_ep(s): - # type: (pkg_resources.EntryPoint) -> Tuple[str, str] - """get the string representation of EntryPoint, - remove space and split on '=' - """ - split_parts = str(s).replace(" ", "").split("=") - return split_parts[0], split_parts[1] - - # convert the EntryPoint objects into strings with module:function - console = dict(_split_ep(v) for v in console.values()) - gui = dict(_split_ep(v) for v in gui.values()) - return console, gui - - -def message_about_scripts_not_on_PATH(scripts): - # type: (Sequence[str]) -> Optional[str] +def get_entrypoints(dist: BaseDistribution) -> Tuple[Dict[str, str], Dict[str, str]]: + console_scripts = {} + gui_scripts = {} + for entry_point in dist.iter_entry_points(): + if entry_point.group == "console_scripts": + console_scripts[entry_point.name] = entry_point.value + elif entry_point.group == "gui_scripts": + gui_scripts[entry_point.name] = entry_point.value + return console_scripts, gui_scripts + + +def message_about_scripts_not_on_PATH(scripts: Sequence[str]) -> Optional[str]: """Determine if any scripts are not on PATH and format a warning. Returns a warning message if one or more scripts are not on PATH, otherwise None. @@ -153,7 +135,7 @@ def message_about_scripts_not_on_PATH(scripts): return None # Group scripts by the path they were installed in - grouped_by_dir = collections.defaultdict(set) # type: Dict[str, Set[str]] + grouped_by_dir: Dict[str, Set[str]] = collections.defaultdict(set) for destfile in scripts: parent_dir = os.path.dirname(destfile) script_name = os.path.basename(destfile) @@ -161,23 +143,26 @@ def message_about_scripts_not_on_PATH(scripts): # We don't want to warn for directories that are on PATH. not_warn_dirs = [ - os.path.normcase(i).rstrip(os.sep) for i in - os.environ.get("PATH", "").split(os.pathsep) + os.path.normcase(os.path.normpath(i)).rstrip(os.sep) + for i in os.environ.get("PATH", "").split(os.pathsep) ] # If an executable sits with sys.executable, we don't warn for it. # This covers the case of venv invocations without activating the venv. - not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable))) - warn_for = { - parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items() - if os.path.normcase(parent_dir) not in not_warn_dirs - } # type: Dict[str, Set[str]] + not_warn_dirs.append( + os.path.normcase(os.path.normpath(os.path.dirname(sys.executable))) + ) + warn_for: Dict[str, Set[str]] = { + parent_dir: scripts + for parent_dir, scripts in grouped_by_dir.items() + if os.path.normcase(os.path.normpath(parent_dir)) not in not_warn_dirs + } if not warn_for: return None # Format a message msg_lines = [] for parent_dir, dir_scripts in warn_for.items(): - sorted_scripts = sorted(dir_scripts) # type: List[str] + sorted_scripts: List[str] = sorted(dir_scripts) if len(sorted_scripts) == 1: start_text = "script {} is".format(sorted_scripts[0]) else: @@ -186,8 +171,9 @@ def message_about_scripts_not_on_PATH(scripts): ) msg_lines.append( - "The {} installed in '{}' which is not on PATH." - .format(start_text, parent_dir) + "The {} installed in '{}' which is not on PATH.".format( + start_text, parent_dir + ) ) last_line_fmt = ( @@ -214,8 +200,9 @@ def message_about_scripts_not_on_PATH(scripts): return "\n".join(msg_lines) -def _normalized_outrows(outrows): - # type: (Iterable[InstalledCSVRow]) -> List[Tuple[str, str, str]] +def _normalized_outrows( + outrows: Iterable[InstalledCSVRow], +) -> List[Tuple[str, str, str]]: """Normalize the given rows of a RECORD file. Items in each row are converted into str. Rows are then sorted to make @@ -235,69 +222,57 @@ def _normalized_outrows(outrows): # For additional background, see-- # https://github.com/pypa/pip/issues/5868 return sorted( - (ensure_str(record_path, encoding='utf-8'), hash_, str(size)) - for record_path, hash_, size in outrows + (record_path, hash_, str(size)) for record_path, hash_, size in outrows ) -def _record_to_fs_path(record_path): - # type: (RecordPath) -> str - return record_path - +def _record_to_fs_path(record_path: RecordPath, lib_dir: str) -> str: + return os.path.join(lib_dir, record_path) -def _fs_to_record_path(path, relative_to=None): - # type: (str, Optional[str]) -> RecordPath - if relative_to is not None: - # On Windows, do not handle relative paths if they belong to different - # logical disks - if os.path.splitdrive(path)[0].lower() == \ - os.path.splitdrive(relative_to)[0].lower(): - path = os.path.relpath(path, relative_to) - path = path.replace(os.path.sep, '/') - return cast('RecordPath', path) +def _fs_to_record_path(path: str, lib_dir: str) -> RecordPath: + # On Windows, do not handle relative paths if they belong to different + # logical disks + if os.path.splitdrive(path)[0].lower() == os.path.splitdrive(lib_dir)[0].lower(): + path = os.path.relpath(path, lib_dir) -def _parse_record_path(record_column): - # type: (str) -> RecordPath - p = ensure_text(record_column, encoding='utf-8') - return cast('RecordPath', p) + path = path.replace(os.path.sep, "/") + return cast("RecordPath", path) def get_csv_rows_for_installed( - old_csv_rows, # type: List[List[str]] - installed, # type: Dict[RecordPath, RecordPath] - changed, # type: Set[RecordPath] - generated, # type: List[str] - lib_dir, # type: str -): - # type: (...) -> List[InstalledCSVRow] + old_csv_rows: List[List[str]], + installed: Dict[RecordPath, RecordPath], + changed: Set[RecordPath], + generated: List[str], + lib_dir: str, +) -> List[InstalledCSVRow]: """ :param installed: A map from archive RECORD path to installation RECORD path. """ - installed_rows = [] # type: List[InstalledCSVRow] + installed_rows: List[InstalledCSVRow] = [] for row in old_csv_rows: if len(row) > 3: - logger.warning('RECORD line has more than three elements: %s', row) - old_record_path = _parse_record_path(row[0]) + logger.warning("RECORD line has more than three elements: %s", row) + old_record_path = cast("RecordPath", row[0]) new_record_path = installed.pop(old_record_path, old_record_path) if new_record_path in changed: - digest, length = rehash(_record_to_fs_path(new_record_path)) + digest, length = rehash(_record_to_fs_path(new_record_path, lib_dir)) else: - digest = row[1] if len(row) > 1 else '' - length = row[2] if len(row) > 2 else '' + digest = row[1] if len(row) > 1 else "" + length = row[2] if len(row) > 2 else "" installed_rows.append((new_record_path, digest, length)) for f in generated: path = _fs_to_record_path(f, lib_dir) digest, length = rehash(f) installed_rows.append((path, digest, length)) for installed_record_path in installed.values(): - installed_rows.append((installed_record_path, '', '')) + installed_rows.append((installed_record_path, "", "")) return installed_rows -def get_console_script_specs(console): - # type: (Dict[str, str]) -> List[str] +def get_console_script_specs(console: Dict[str, str]) -> List[str]: """ Given the mapping from entrypoint name to callable, return the relevant console script specs. @@ -340,62 +315,57 @@ def get_console_script_specs(console): # DEFAULT # - The default behavior is to install pip, pipX, pipX.Y, easy_install # and easy_install-X.Y. - pip_script = console.pop('pip', None) + pip_script = console.pop("pip", None) if pip_script: if "ENSUREPIP_OPTIONS" not in os.environ: - scripts_to_generate.append('pip = ' + pip_script) + scripts_to_generate.append("pip = " + pip_script) if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall": scripts_to_generate.append( - 'pip{} = {}'.format(sys.version_info[0], pip_script) + "pip{} = {}".format(sys.version_info[0], pip_script) ) - scripts_to_generate.append( - f'pip{get_major_minor_version()} = {pip_script}' - ) + scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}") # Delete any other versioned pip entry points - pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)] + pip_ep = [k for k in console if re.match(r"pip(\d+(\.\d+)?)?$", k)] for k in pip_ep: del console[k] - easy_install_script = console.pop('easy_install', None) + easy_install_script = console.pop("easy_install", None) if easy_install_script: if "ENSUREPIP_OPTIONS" not in os.environ: - scripts_to_generate.append( - 'easy_install = ' + easy_install_script - ) + scripts_to_generate.append("easy_install = " + easy_install_script) scripts_to_generate.append( - 'easy_install-{} = {}'.format( + "easy_install-{} = {}".format( get_major_minor_version(), easy_install_script ) ) # Delete any other versioned easy_install entry points easy_install_ep = [ - k for k in console if re.match(r'easy_install(-\d\.\d)?$', k) + k for k in console if re.match(r"easy_install(-\d+\.\d+)?$", k) ] for k in easy_install_ep: del console[k] # Generate the console entry points specified in the wheel - scripts_to_generate.extend(starmap('{} = {}'.format, console.items())) + scripts_to_generate.extend(starmap("{} = {}".format, console.items())) return scripts_to_generate class ZipBackedFile: - def __init__(self, src_record_path, dest_path, zip_file): - # type: (RecordPath, str, ZipFile) -> None + def __init__( + self, src_record_path: RecordPath, dest_path: str, zip_file: ZipFile + ) -> None: self.src_record_path = src_record_path self.dest_path = dest_path self._zip_file = zip_file self.changed = False - def _getinfo(self): - # type: () -> ZipInfo + def _getinfo(self) -> ZipInfo: return self._zip_file.getinfo(self.src_record_path) - def save(self): - # type: () -> None + def save(self) -> None: # directory creation is lazy and after file filtering # to ensure we don't install empty dirs; empty dirs can't be # uninstalled. @@ -424,22 +394,19 @@ class ZipBackedFile: class ScriptFile: - def __init__(self, file): - # type: (File) -> None + def __init__(self, file: "File") -> None: self._file = file self.src_record_path = self._file.src_record_path self.dest_path = self._file.dest_path self.changed = False - def save(self): - # type: () -> None + def save(self) -> None: self._file.save() self.changed = fix_script(self.dest_path) class MissingCallableSuffix(InstallationError): - def __init__(self, entry_point): - # type: (str) -> None + def __init__(self, entry_point: str) -> None: super().__init__( "Invalid script entry point: {} - A callable " "suffix is required. Cf https://packaging.python.org/" @@ -448,31 +415,30 @@ class MissingCallableSuffix(InstallationError): ) -def _raise_for_invalid_entrypoint(specification): - # type: (str) -> None +def _raise_for_invalid_entrypoint(specification: str) -> None: entry = get_export_entry(specification) if entry is not None and entry.suffix is None: raise MissingCallableSuffix(str(entry)) class PipScriptMaker(ScriptMaker): - def make(self, specification, options=None): - # type: (str, Dict[str, Any]) -> List[str] + def make( + self, specification: str, options: Optional[Dict[str, Any]] = None + ) -> List[str]: _raise_for_invalid_entrypoint(specification) return super().make(specification, options) def _install_wheel( - name, # type: str - wheel_zip, # type: ZipFile - wheel_path, # type: str - scheme, # type: Scheme - pycompile=True, # type: bool - warn_script_location=True, # type: bool - direct_url=None, # type: Optional[DirectUrl] - requested=False, # type: bool -): - # type: (...) -> None + name: str, + wheel_zip: ZipFile, + wheel_path: str, + scheme: Scheme, + pycompile: bool = True, + warn_script_location: bool = True, + direct_url: Optional[DirectUrl] = None, + requested: bool = False, +) -> None: """Install a wheel. :param name: Name of the project to install @@ -499,33 +465,23 @@ def _install_wheel( # installed = files copied from the wheel to the destination # changed = files changed while installing (scripts #! line typically) # generated = files newly generated during the install (script wrappers) - installed = {} # type: Dict[RecordPath, RecordPath] - changed = set() # type: Set[RecordPath] - generated = [] # type: List[str] + installed: Dict[RecordPath, RecordPath] = {} + changed: Set[RecordPath] = set() + generated: List[str] = [] - def record_installed(srcfile, destfile, modified=False): - # type: (RecordPath, str, bool) -> None + def record_installed( + srcfile: RecordPath, destfile: str, modified: bool = False + ) -> None: """Map archive RECORD paths to installation RECORD paths.""" newpath = _fs_to_record_path(destfile, lib_dir) installed[srcfile] = newpath if modified: - changed.add(_fs_to_record_path(destfile)) - - def all_paths(): - # type: () -> Iterable[RecordPath] - names = wheel_zip.namelist() - # If a flag is set, names may be unicode in Python 2. We convert to - # text explicitly so these are valid for lookup in RECORD. - decoded_names = map(ensure_text, names) - for name in decoded_names: - yield cast("RecordPath", name) - - def is_dir_path(path): - # type: (RecordPath) -> bool + changed.add(newpath) + + def is_dir_path(path: RecordPath) -> bool: return path.endswith("/") - def assert_no_path_traversal(dest_dir_path, target_path): - # type: (str, str) -> None + def assert_no_path_traversal(dest_dir_path: str, target_path: str) -> None: if not is_within_directory(dest_dir_path, target_path): message = ( "The wheel {!r} has a file {!r} trying to install" @@ -535,10 +491,10 @@ def _install_wheel( message.format(wheel_path, target_path, dest_dir_path) ) - def root_scheme_file_maker(zip_file, dest): - # type: (ZipFile, str) -> Callable[[RecordPath], File] - def make_root_scheme_file(record_path): - # type: (RecordPath) -> File + def root_scheme_file_maker( + zip_file: ZipFile, dest: str + ) -> Callable[[RecordPath], "File"]: + def make_root_scheme_file(record_path: RecordPath) -> "File": normed_path = os.path.normpath(record_path) dest_path = os.path.join(dest, normed_path) assert_no_path_traversal(dest, dest_path) @@ -546,17 +502,12 @@ def _install_wheel( return make_root_scheme_file - def data_scheme_file_maker(zip_file, scheme): - # type: (ZipFile, Scheme) -> Callable[[RecordPath], File] - scheme_paths = {} - for key in SCHEME_KEYS: - encoded_key = ensure_text(key) - scheme_paths[encoded_key] = ensure_text( - getattr(scheme, key), encoding=sys.getfilesystemencoding() - ) + def data_scheme_file_maker( + zip_file: ZipFile, scheme: Scheme + ) -> Callable[[RecordPath], "File"]: + scheme_paths = {key: getattr(scheme, key) for key in SCHEME_KEYS} - def make_data_scheme_file(record_path): - # type: (RecordPath) -> File + def make_data_scheme_file(record_path: RecordPath) -> "File": normed_path = os.path.normpath(record_path) try: _, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2) @@ -575,9 +526,7 @@ def _install_wheel( "Unknown scheme key used in {}: {} (for file {!r}). .data" " directory contents should be in subdirectories named" " with a valid scheme key ({})" - ).format( - wheel_path, scheme_key, record_path, valid_scheme_keys - ) + ).format(wheel_path, scheme_key, record_path, valid_scheme_keys) raise InstallationError(message) dest_path = os.path.join(scheme_path, dest_subpath) @@ -586,30 +535,19 @@ def _install_wheel( return make_data_scheme_file - def is_data_scheme_path(path): - # type: (RecordPath) -> bool + def is_data_scheme_path(path: RecordPath) -> bool: return path.split("/", 1)[0].endswith(".data") - paths = all_paths() + paths = cast(List[RecordPath], wheel_zip.namelist()) file_paths = filterfalse(is_dir_path, paths) - root_scheme_paths, data_scheme_paths = partition( - is_data_scheme_path, file_paths - ) + root_scheme_paths, data_scheme_paths = partition(is_data_scheme_path, file_paths) - make_root_scheme_file = root_scheme_file_maker( - wheel_zip, - ensure_text(lib_dir, encoding=sys.getfilesystemencoding()), - ) - files = map(make_root_scheme_file, root_scheme_paths) + make_root_scheme_file = root_scheme_file_maker(wheel_zip, lib_dir) + files: Iterator[File] = map(make_root_scheme_file, root_scheme_paths) - def is_script_scheme_path(path): - # type: (RecordPath) -> bool + def is_script_scheme_path(path: RecordPath) -> bool: parts = path.split("/", 2) - return ( - len(parts) > 2 and - parts[0].endswith(".data") and - parts[1] == "scripts" - ) + return len(parts) > 2 and parts[0].endswith(".data") and parts[1] == "scripts" other_scheme_paths, script_scheme_paths = partition( is_script_scheme_path, data_scheme_paths @@ -620,32 +558,32 @@ def _install_wheel( files = chain(files, other_scheme_files) # Get the defined entry points - distribution = pkg_resources_distribution_for_wheel( - wheel_zip, name, wheel_path + distribution = get_wheel_distribution( + FilesystemWheel(wheel_path), + canonicalize_name(name), ) console, gui = get_entrypoints(distribution) - def is_entrypoint_wrapper(file): - # type: (File) -> bool + def is_entrypoint_wrapper(file: "File") -> bool: # EP, EP.exe and EP-script.py are scripts generated for # entry point EP by setuptools path = file.dest_path name = os.path.basename(path) - if name.lower().endswith('.exe'): + if name.lower().endswith(".exe"): matchname = name[:-4] - elif name.lower().endswith('-script.py'): + elif name.lower().endswith("-script.py"): matchname = name[:-10] elif name.lower().endswith(".pya"): matchname = name[:-4] else: matchname = name # Ignore setuptools-generated scripts - return (matchname in console or matchname in gui) + return matchname in console or matchname in gui - script_scheme_files = map(make_data_scheme_file, script_scheme_paths) - script_scheme_files = filterfalse( - is_entrypoint_wrapper, script_scheme_files + script_scheme_files: Iterator[File] = map( + make_data_scheme_file, script_scheme_paths ) + script_scheme_files = filterfalse(is_entrypoint_wrapper, script_scheme_files) script_scheme_files = map(ScriptFile, script_scheme_files) files = chain(files, script_scheme_files) @@ -653,8 +591,7 @@ def _install_wheel( file.save() record_installed(file.src_record_path, file.dest_path, file.changed) - def pyc_source_file_paths(): - # type: () -> Iterator[str] + def pyc_source_file_paths() -> Generator[str, None, None]: # We de-duplicate installation paths, since there can be overlap (e.g. # file in .data maps to same location as file in wheel root). # Sorting installation paths makes it easier to reproduce and debug @@ -663,30 +600,21 @@ def _install_wheel( full_installed_path = os.path.join(lib_dir, installed_path) if not os.path.isfile(full_installed_path): continue - if not full_installed_path.endswith('.py'): + if not full_installed_path.endswith(".py"): continue yield full_installed_path - def pyc_output_path(path): - # type: (str) -> str - """Return the path the pyc file would have been written to. - """ + def pyc_output_path(path: str) -> str: + """Return the path the pyc file would have been written to.""" return importlib.util.cache_from_source(path) # Compile all of the pyc files for the installed files if pycompile: with captured_stdout() as stdout: with warnings.catch_warnings(): - warnings.filterwarnings('ignore') + warnings.filterwarnings("ignore") for path in pyc_source_file_paths(): - # Python 2's `compileall.compile_file` requires a str in - # error cases, so we must convert to the native type. - path_arg = ensure_str( - path, encoding=sys.getfilesystemencoding() - ) - success = compileall.compile_file( - path_arg, force=True, quiet=True - ) + success = compileall.compile_file(path, force=True, quiet=True) if success: pyc_path = pyc_output_path(path) assert os.path.exists(pyc_path) @@ -705,7 +633,7 @@ def _install_wheel( # Ensure we don't generate any variants for scripts because this is almost # never what somebody wants. # See https://bitbucket.org/pypa/distlib/issue/35/ - maker.variants = {''} + maker.variants = {""} # This is required because otherwise distlib creates scripts that are not # executable. @@ -715,14 +643,12 @@ def _install_wheel( # Generate the console and GUI entry points specified in the wheel scripts_to_generate = get_console_script_specs(console) - gui_scripts_to_generate = list(starmap('{} = {}'.format, gui.items())) + gui_scripts_to_generate = list(starmap("{} = {}".format, gui.items())) generated_console_scripts = maker.make_multiple(scripts_to_generate) generated.extend(generated_console_scripts) - generated.extend( - maker.make_multiple(gui_scripts_to_generate, {'gui': True}) - ) + generated.extend(maker.make_multiple(gui_scripts_to_generate, {"gui": True})) if warn_script_location: msg = message_about_scripts_not_on_PATH(generated_console_scripts) @@ -732,8 +658,7 @@ def _install_wheel( generated_file_mode = 0o666 & ~current_umask() @contextlib.contextmanager - def _generate_file(path, **kwargs): - # type: (str, **Any) -> Iterator[BinaryIO] + def _generate_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]: with adjacent_tmp_file(path, **kwargs) as f: yield f os.chmod(f.name, generated_file_mode) @@ -742,9 +667,9 @@ def _install_wheel( dest_info_dir = os.path.join(lib_dir, info_dir) # Record pip as the installer - installer_path = os.path.join(dest_info_dir, 'INSTALLER') + installer_path = os.path.join(dest_info_dir, "INSTALLER") with _generate_file(installer_path) as installer_file: - installer_file.write(b'pip\n') + installer_file.write(b"pip\n") generated.append(installer_path) # Record the PEP 610 direct URL reference @@ -756,12 +681,12 @@ def _install_wheel( # Record the REQUESTED file if requested: - requested_path = os.path.join(dest_info_dir, 'REQUESTED') + requested_path = os.path.join(dest_info_dir, "REQUESTED") with open(requested_path, "wb"): pass generated.append(requested_path) - record_text = distribution.get_metadata('RECORD') + record_text = distribution.read_text("RECORD") record_rows = list(csv.reader(record_text.splitlines())) rows = get_csv_rows_for_installed( @@ -769,42 +694,38 @@ def _install_wheel( installed=installed, changed=changed, generated=generated, - lib_dir=lib_dir) + lib_dir=lib_dir, + ) # Record details of all files installed - record_path = os.path.join(dest_info_dir, 'RECORD') + record_path = os.path.join(dest_info_dir, "RECORD") - with _generate_file(record_path, **csv_io_kwargs('w')) as record_file: - # The type mypy infers for record_file is different for Python 3 - # (typing.IO[Any]) and Python 2 (typing.BinaryIO). We explicitly - # cast to typing.IO[str] as a workaround. - writer = csv.writer(cast('IO[str]', record_file)) + with _generate_file(record_path, **csv_io_kwargs("w")) as record_file: + # Explicitly cast to typing.IO[str] as a workaround for the mypy error: + # "writer" has incompatible type "BinaryIO"; expected "_Writer" + writer = csv.writer(cast("IO[str]", record_file)) writer.writerows(_normalized_outrows(rows)) @contextlib.contextmanager -def req_error_context(req_description): - # type: (str) -> Iterator[None] +def req_error_context(req_description: str) -> Generator[None, None, None]: try: yield except InstallationError as e: message = "For req: {}. {}".format(req_description, e.args[0]) - reraise( - InstallationError, InstallationError(message), sys.exc_info()[2] - ) + raise InstallationError(message) from e def install_wheel( - name, # type: str - wheel_path, # type: str - scheme, # type: Scheme - req_description, # type: str - pycompile=True, # type: bool - warn_script_location=True, # type: bool - direct_url=None, # type: Optional[DirectUrl] - requested=False, # type: bool -): - # type: (...) -> None + name: str, + wheel_path: str, + scheme: Scheme, + req_description: str, + pycompile: bool = True, + warn_script_location: bool = True, + direct_url: Optional[DirectUrl] = None, + requested: bool = False, +) -> None: with ZipFile(wheel_path, allowZip64=True) as z: with req_error_context(req_description): _install_wheel( diff --git a/env/Lib/site-packages/pip/_internal/operations/prepare.py b/env/Lib/site-packages/pip/_internal/operations/prepare.py index 3d074f9f62911e5278d01fa3f088141ce0b03e27..cb121bcb25245ac039e8325413c4f2be3ad71d42 100644 --- a/env/Lib/site-packages/pip/_internal/operations/prepare.py +++ b/env/Lib/site-packages/pip/_internal/operations/prepare.py @@ -8,10 +8,9 @@ import logging import mimetypes import os import shutil -from typing import Dict, Iterable, List, Optional, Tuple +from typing import Dict, Iterable, List, Optional from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.pkg_resources import Distribution from pip._internal.distributions import make_distribution_for_install_requirement from pip._internal.distributions.installed import InstalledDistribution @@ -20,11 +19,14 @@ from pip._internal.exceptions import ( HashMismatch, HashUnpinned, InstallationError, + MetadataInconsistent, NetworkConnectionError, PreviousBuildDirError, VcsHashUnsupported, ) from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import BaseDistribution, get_metadata_distribution +from pip._internal.models.direct_url import ArchiveInfo from pip._internal.models.link import Link from pip._internal.models.wheel import Wheel from pip._internal.network.download import BatchDownloader, Downloader @@ -33,13 +35,20 @@ from pip._internal.network.lazy_wheel import ( dist_from_wheel_url, ) from pip._internal.network.session import PipSession +from pip._internal.operations.build.build_tracker import BuildTracker from pip._internal.req.req_install import InstallRequirement -from pip._internal.req.req_tracker import RequirementTracker -from pip._internal.utils.deprecation import deprecated -from pip._internal.utils.filesystem import copy2_fixed +from pip._internal.utils.direct_url_helpers import ( + direct_url_for_editable, + direct_url_from_link, +) from pip._internal.utils.hashes import Hashes, MissingHashes from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import display_path, hide_url, rmtree +from pip._internal.utils.misc import ( + display_path, + hash_file, + hide_url, + is_installable_dir, +) from pip._internal.utils.temp_dir import TempDirectory from pip._internal.utils.unpacking import unpack_file from pip._internal.vcs import vcs @@ -48,30 +57,29 @@ logger = logging.getLogger(__name__) def _get_prepared_distribution( - req, # type: InstallRequirement - req_tracker, # type: RequirementTracker - finder, # type: PackageFinder - build_isolation, # type: bool -): - # type: (...) -> Distribution + req: InstallRequirement, + build_tracker: BuildTracker, + finder: PackageFinder, + build_isolation: bool, + check_build_deps: bool, +) -> BaseDistribution: """Prepare a distribution for installation.""" abstract_dist = make_distribution_for_install_requirement(req) - with req_tracker.track(req): - abstract_dist.prepare_distribution_metadata(finder, build_isolation) - return abstract_dist.get_pkg_resources_distribution() + with build_tracker.track(req): + abstract_dist.prepare_distribution_metadata( + finder, build_isolation, check_build_deps + ) + return abstract_dist.get_metadata_distribution() -def unpack_vcs_link(link, location): - # type: (Link, str) -> None +def unpack_vcs_link(link: Link, location: str, verbosity: int) -> None: vcs_backend = vcs.get_backend_for_scheme(link.scheme) assert vcs_backend is not None - vcs_backend.unpack(location, url=hide_url(link.url)) + vcs_backend.unpack(location, url=hide_url(link.url), verbosity=verbosity) class File: - - def __init__(self, path, content_type): - # type: (str, Optional[str]) -> None + def __init__(self, path: str, content_type: Optional[str]) -> None: self.path = path if content_type is None: self.content_type = mimetypes.guess_type(path)[0] @@ -80,19 +88,16 @@ class File: def get_http_url( - link, # type: Link - download, # type: Downloader - download_dir=None, # type: Optional[str] - hashes=None, # type: Optional[Hashes] -): - # type: (...) -> File + link: Link, + download: Downloader, + download_dir: Optional[str] = None, + hashes: Optional[Hashes] = None, +) -> File: temp_dir = TempDirectory(kind="unpack", globally_managed=True) # If a download dir is specified, is the file already downloaded there? already_downloaded_path = None if download_dir: - already_downloaded_path = _check_download_dir( - link, download_dir, hashes - ) + already_downloaded_path = _check_download_dir(link, download_dir, hashes) if already_downloaded_path: from_path = already_downloaded_path @@ -106,72 +111,14 @@ def get_http_url( return File(from_path, content_type) -def _copy2_ignoring_special_files(src, dest): - # type: (str, str) -> None - """Copying special files is not supported, but as a convenience to users - we skip errors copying them. This supports tools that may create e.g. - socket files in the project source directory. - """ - try: - copy2_fixed(src, dest) - except shutil.SpecialFileError as e: - # SpecialFileError may be raised due to either the source or - # destination. If the destination was the cause then we would actually - # care, but since the destination directory is deleted prior to - # copy we ignore all of them assuming it is caused by the source. - logger.warning( - "Ignoring special file error '%s' encountered copying %s to %s.", - str(e), - src, - dest, - ) - - -def _copy_source_tree(source, target): - # type: (str, str) -> None - target_abspath = os.path.abspath(target) - target_basename = os.path.basename(target_abspath) - target_dirname = os.path.dirname(target_abspath) - - def ignore(d, names): - # type: (str, List[str]) -> List[str] - skipped = [] # type: List[str] - if d == source: - # Pulling in those directories can potentially be very slow, - # exclude the following directories if they appear in the top - # level dir (and only it). - # See discussion at https://github.com/pypa/pip/pull/6770 - skipped += ['.tox', '.nox'] - if os.path.abspath(d) == target_dirname: - # Prevent an infinite recursion if the target is in source. - # This can happen when TMPDIR is set to ${PWD}/... - # and we copy PWD to TMPDIR. - skipped += [target_basename] - return skipped - - shutil.copytree( - source, - target, - ignore=ignore, - symlinks=True, - copy_function=_copy2_ignoring_special_files, - ) - - def get_file_url( - link, # type: Link - download_dir=None, # type: Optional[str] - hashes=None # type: Optional[Hashes] -): - # type: (...) -> File - """Get file and optionally check its hash. - """ + link: Link, download_dir: Optional[str] = None, hashes: Optional[Hashes] = None +) -> File: + """Get file and optionally check its hash.""" # If a download dir is specified, is the file already there and valid? already_downloaded_path = None if download_dir: - already_downloaded_path = _check_download_dir( - link, download_dir, hashes - ) + already_downloaded_path = _check_download_dir(link, download_dir, hashes) if already_downloaded_path: from_path = already_downloaded_path @@ -189,13 +136,13 @@ def get_file_url( def unpack_url( - link, # type: Link - location, # type: str - download, # type: Downloader - download_dir=None, # type: Optional[str] - hashes=None, # type: Optional[Hashes] -): - # type: (...) -> Optional[File] + link: Link, + location: str, + download: Downloader, + verbosity: int, + download_dir: Optional[str] = None, + hashes: Optional[Hashes] = None, +) -> Optional[File]: """Unpack link into location, downloading if required. :param hashes: A Hashes object, one of whose embedded hashes must match, @@ -205,30 +152,10 @@ def unpack_url( """ # non-editable vcs urls if link.is_vcs: - unpack_vcs_link(link, location) + unpack_vcs_link(link, location, verbosity=verbosity) return None - # Once out-of-tree-builds are no longer supported, could potentially - # replace the below condition with `assert not link.is_existing_dir` - # - unpack_url does not need to be called for in-tree-builds. - # - # As further cleanup, _copy_source_tree and accompanying tests can - # be removed. - if link.is_existing_dir(): - deprecated( - "A future pip version will change local packages to be built " - "in-place without first copying to a temporary directory. " - "We recommend you use --use-feature=in-tree-build to test " - "your packages with this new behavior before it becomes the " - "default.\n", - replacement=None, - gone_in="21.3", - issue=7555 - ) - if os.path.isdir(location): - rmtree(location) - _copy_source_tree(link.file_path, location) - return None + assert not link.is_existing_dir() # file urls if link.is_file: @@ -251,10 +178,14 @@ def unpack_url( return file -def _check_download_dir(link, download_dir, hashes): - # type: (Link, str, Optional[Hashes]) -> Optional[str] - """ Check download_dir for previously downloaded file with correct hash - If a correct file is found return its path else None +def _check_download_dir( + link: Link, + download_dir: str, + hashes: Optional[Hashes], + warn_on_hash_mismatch: bool = True, +) -> Optional[str]: + """Check download_dir for previously downloaded file with correct hash + If a correct file is found return its path else None """ download_path = os.path.join(download_dir, link.filename) @@ -262,46 +193,46 @@ def _check_download_dir(link, download_dir, hashes): return None # If already downloaded, does its hash match? - logger.info('File was already downloaded %s', download_path) + logger.info("File was already downloaded %s", download_path) if hashes: try: hashes.check_against_path(download_path) except HashMismatch: - logger.warning( - 'Previously-downloaded file %s has bad hash. ' - 'Re-downloading.', - download_path - ) + if warn_on_hash_mismatch: + logger.warning( + "Previously-downloaded file %s has bad hash. Re-downloading.", + download_path, + ) os.unlink(download_path) return None return download_path class RequirementPreparer: - """Prepares a Requirement - """ + """Prepares a Requirement""" def __init__( self, - build_dir, # type: str - download_dir, # type: Optional[str] - src_dir, # type: str - build_isolation, # type: bool - req_tracker, # type: RequirementTracker - session, # type: PipSession - progress_bar, # type: str - finder, # type: PackageFinder - require_hashes, # type: bool - use_user_site, # type: bool - lazy_wheel, # type: bool - in_tree_build, # type: bool - ): - # type: (...) -> None + build_dir: str, + download_dir: Optional[str], + src_dir: str, + build_isolation: bool, + check_build_deps: bool, + build_tracker: BuildTracker, + session: PipSession, + progress_bar: str, + finder: PackageFinder, + require_hashes: bool, + use_user_site: bool, + lazy_wheel: bool, + verbosity: int, + legacy_resolver: bool, + ) -> None: super().__init__() self.src_dir = src_dir self.build_dir = build_dir - self.req_tracker = req_tracker + self.build_tracker = build_tracker self._session = session self._download = Downloader(session, progress_bar) self._batch_download = BatchDownloader(session, progress_bar) @@ -314,6 +245,9 @@ class RequirementPreparer: # Is build isolation allowed? self.build_isolation = build_isolation + # Should check build dependencies? + self.check_build_deps = check_build_deps + # Should hash-checking be required? self.require_hashes = require_hashes @@ -323,35 +257,48 @@ class RequirementPreparer: # Should wheels be downloaded lazily? self.use_lazy_wheel = lazy_wheel - # Should in-tree builds be used for local paths? - self.in_tree_build = in_tree_build + # How verbose should underlying tooling be? + self.verbosity = verbosity + + # Are we using the legacy resolver? + self.legacy_resolver = legacy_resolver - # Memoized downloaded files, as mapping of url: (path, mime type) - self._downloaded = {} # type: Dict[str, Tuple[str, str]] + # Memoized downloaded files, as mapping of url: path. + self._downloaded: Dict[str, str] = {} # Previous "header" printed for a link-based InstallRequirement self._previous_requirement_header = ("", "") - def _log_preparing_link(self, req): - # type: (InstallRequirement) -> None + def _log_preparing_link(self, req: InstallRequirement) -> None: """Provide context for the requirement being prepared.""" - if req.link.is_file and not req.original_link_is_in_wheel_cache: + if req.link.is_file and not req.is_wheel_from_cache: message = "Processing %s" information = str(display_path(req.link.file_path)) else: message = "Collecting %s" information = str(req.req or req) + # If we used req.req, inject requirement source if available (this + # would already be included if we used req directly) + if req.req and req.comes_from: + if isinstance(req.comes_from, str): + comes_from: Optional[str] = req.comes_from + else: + comes_from = req.comes_from.from_path() + if comes_from: + information += f" (from {comes_from})" + if (message, information) != self._previous_requirement_header: self._previous_requirement_header = (message, information) logger.info(message, information) - if req.original_link_is_in_wheel_cache: + if req.is_wheel_from_cache: with indent_log(): logger.info("Using cached %s", req.link.filename) - def _ensure_link_req_src_dir(self, req, parallel_builds): - # type: (InstallRequirement, bool) -> None + def _ensure_link_req_src_dir( + self, req: InstallRequirement, parallel_builds: bool + ) -> None: """Ensure source_dir of a linked InstallRequirement.""" # Since source_dir is only set for editable requirements. if req.link.is_wheel: @@ -359,7 +306,7 @@ class RequirementPreparer: # directory. return assert req.source_dir is None - if req.link.is_existing_dir() and self.in_tree_build: + if req.link.is_existing_dir(): # build local directories in-tree req.source_dir = req.link.file_path return @@ -376,7 +323,8 @@ class RequirementPreparer: # installation. # FIXME: this won't upgrade when there's an existing # package unpacked in `req.source_dir` - if os.path.exists(os.path.join(req.source_dir, 'setup.py')): + # TODO: this check is now probably dead code + if is_installable_dir(req.source_dir): raise PreviousBuildDirError( "pip can't proceed with requirements '{}' due to a" "pre-existing build directory ({}). This is likely " @@ -385,8 +333,7 @@ class RequirementPreparer: "Please delete it and try again.".format(req, req.source_dir) ) - def _get_linked_req_hashes(self, req): - # type: (InstallRequirement) -> Hashes + def _get_linked_req_hashes(self, req: InstallRequirement) -> Hashes: # By the time this is called, the requirement's link should have # been checked so we can tell what kind of requirements req is # and raise some more informative errors than otherwise. @@ -409,7 +356,7 @@ class RequirementPreparer: # a surprising hash mismatch in the future. # file:/// URLs aren't pinnable, so don't complain about them # not being pinned. - if req.original_link is None and not req.is_pinned: + if not req.is_direct and not req.is_pinned: raise HashUnpinned() # If known-good hashes are missing for this requirement, @@ -418,18 +365,77 @@ class RequirementPreparer: # showing the user what the hash should be. return req.hashes(trust_internet=False) or MissingHashes() - def _fetch_metadata_using_lazy_wheel(self, link): - # type: (Link) -> Optional[Distribution] - """Fetch metadata using lazy wheel, if possible.""" - if not self.use_lazy_wheel: + def _fetch_metadata_only( + self, + req: InstallRequirement, + ) -> Optional[BaseDistribution]: + if self.legacy_resolver: + logger.debug( + "Metadata-only fetching is not used in the legacy resolver", + ) return None if self.require_hashes: - logger.debug('Lazy wheel is not used as hash checking is required') + logger.debug( + "Metadata-only fetching is not used as hash checking is required", + ) + return None + # Try PEP 658 metadata first, then fall back to lazy wheel if unavailable. + return self._fetch_metadata_using_link_data_attr( + req + ) or self._fetch_metadata_using_lazy_wheel(req.link) + + def _fetch_metadata_using_link_data_attr( + self, + req: InstallRequirement, + ) -> Optional[BaseDistribution]: + """Fetch metadata from the data-dist-info-metadata attribute, if possible.""" + # (1) Get the link to the metadata file, if provided by the backend. + metadata_link = req.link.metadata_link() + if metadata_link is None: + return None + assert req.req is not None + logger.info( + "Obtaining dependency information for %s from %s", + req.req, + metadata_link, + ) + # (2) Download the contents of the METADATA file, separate from the dist itself. + metadata_file = get_http_url( + metadata_link, + self._download, + hashes=metadata_link.as_hashes(), + ) + with open(metadata_file.path, "rb") as f: + metadata_contents = f.read() + # (3) Generate a dist just from those file contents. + metadata_dist = get_metadata_distribution( + metadata_contents, + req.link.filename, + req.req.name, + ) + # (4) Ensure the Name: field from the METADATA file matches the name from the + # install requirement. + # + # NB: raw_name will fall back to the name from the install requirement if + # the Name: field is not present, but it's noted in the raw_name docstring + # that that should NEVER happen anyway. + if canonicalize_name(metadata_dist.raw_name) != canonicalize_name(req.req.name): + raise MetadataInconsistent( + req, "Name", req.req.name, metadata_dist.raw_name + ) + return metadata_dist + + def _fetch_metadata_using_lazy_wheel( + self, + link: Link, + ) -> Optional[BaseDistribution]: + """Fetch metadata using lazy wheel, if possible.""" + # --use-feature=fast-deps must be provided. + if not self.use_lazy_wheel: return None if link.is_file or not link.is_wheel: logger.debug( - 'Lazy wheel is not used as ' - '%r does not points to a remote wheel', + "Lazy wheel is not used as %r does not point to a remote wheel", link, ) return None @@ -437,22 +443,22 @@ class RequirementPreparer: wheel = Wheel(link.filename) name = canonicalize_name(wheel.name) logger.info( - 'Obtaining dependency information from %s %s', - name, wheel.version, + "Obtaining dependency information from %s %s", + name, + wheel.version, ) - url = link.url.split('#', 1)[0] + url = link.url.split("#", 1)[0] try: return dist_from_wheel_url(name, url, self._session) except HTTPRangeRequestUnsupported: - logger.debug('%s does not support range requests', url) + logger.debug("%s does not support range requests", url) return None def _complete_partial_requirements( self, - partially_downloaded_reqs, # type: Iterable[InstallRequirement] - parallel_builds=False, # type: bool - ): - # type: (...) -> None + partially_downloaded_reqs: Iterable[InstallRequirement], + parallel_builds: bool = False, + ) -> None: """Download any requirements which were only fetched by metadata.""" # Download to a temporary directory. These will be copied over as # needed for downstream 'download', 'wheel', and 'install' commands. @@ -461,7 +467,7 @@ class RequirementPreparer: # Map each link to the requirement that owns it. This allows us to set # `req.local_file_path` on the appropriate requirement after passing # all the links at once into BatchDownloader. - links_to_fully_download = {} # type: Dict[Link, InstallRequirement] + links_to_fully_download: Dict[Link, InstallRequirement] = {} for req in partially_downloaded_reqs: assert req.link links_to_fully_download[req.link] = req @@ -474,41 +480,66 @@ class RequirementPreparer: logger.debug("Downloading link %s to %s", link, filepath) req = links_to_fully_download[link] req.local_file_path = filepath + # TODO: This needs fixing for sdists + # This is an emergency fix for #11847, which reports that + # distributions get downloaded twice when metadata is loaded + # from a PEP 658 standalone metadata file. Setting _downloaded + # fixes this for wheels, but breaks the sdist case (tests + # test_download_metadata). As PyPI is currently only serving + # metadata for wheels, this is not an immediate issue. + # Fixing the problem properly looks like it will require a + # complete refactoring of the `prepare_linked_requirements_more` + # logic, and I haven't a clue where to start on that, so for now + # I have fixed the issue *just* for wheels. + if req.is_wheel: + self._downloaded[req.link.url] = filepath # This step is necessary to ensure all lazy wheels are processed # successfully by the 'download', 'wheel', and 'install' commands. for req in partially_downloaded_reqs: self._prepare_linked_requirement(req, parallel_builds) - def prepare_linked_requirement(self, req, parallel_builds=False): - # type: (InstallRequirement, bool) -> Distribution + def prepare_linked_requirement( + self, req: InstallRequirement, parallel_builds: bool = False + ) -> BaseDistribution: """Prepare a requirement to be obtained from req.link.""" assert req.link - link = req.link self._log_preparing_link(req) with indent_log(): # Check if the relevant file is already available # in the download directory file_path = None - if self.download_dir is not None and link.is_wheel: + if self.download_dir is not None and req.link.is_wheel: hashes = self._get_linked_req_hashes(req) - file_path = _check_download_dir(req.link, self.download_dir, hashes) + file_path = _check_download_dir( + req.link, + self.download_dir, + hashes, + # When a locally built wheel has been found in cache, we don't warn + # about re-downloading when the already downloaded wheel hash does + # not match. This is because the hash must be checked against the + # original link, not the cached link. It that case the already + # downloaded file will be removed and re-fetched from cache (which + # implies a hash check against the cache entry's origin.json). + warn_on_hash_mismatch=not req.is_wheel_from_cache, + ) if file_path is not None: # The file is already available, so mark it as downloaded - self._downloaded[req.link.url] = file_path, None + self._downloaded[req.link.url] = file_path else: # The file is not available, attempt to fetch only metadata - wheel_dist = self._fetch_metadata_using_lazy_wheel(link) - if wheel_dist is not None: + metadata_dist = self._fetch_metadata_only(req) + if metadata_dist is not None: req.needs_more_preparation = True - return wheel_dist + return metadata_dist # None of the optimizations worked, fully prepare the requirement return self._prepare_linked_requirement(req, parallel_builds) - def prepare_linked_requirements_more(self, reqs, parallel_builds=False): - # type: (Iterable[InstallRequirement], bool) -> None + def prepare_linked_requirements_more( + self, reqs: Iterable[InstallRequirement], parallel_builds: bool = False + ) -> None: """Prepare linked requirements more, if needed.""" reqs = [req for req in reqs if req.needs_more_preparation] for req in reqs: @@ -517,12 +548,12 @@ class RequirementPreparer: hashes = self._get_linked_req_hashes(req) file_path = _check_download_dir(req.link, self.download_dir, hashes) if file_path is not None: - self._downloaded[req.link.url] = file_path, None + self._downloaded[req.link.url] = file_path req.needs_more_preparation = False # Prepare requirements we found were already downloaded for some # reason. The other downloads will be completed separately. - partially_downloaded_reqs = [] # type: List[InstallRequirement] + partially_downloaded_reqs: List[InstallRequirement] = [] for req in reqs: if req.needs_more_preparation: partially_downloaded_reqs.append(req) @@ -532,35 +563,87 @@ class RequirementPreparer: # TODO: separate this part out from RequirementPreparer when the v1 # resolver can be removed! self._complete_partial_requirements( - partially_downloaded_reqs, parallel_builds=parallel_builds, + partially_downloaded_reqs, + parallel_builds=parallel_builds, ) - def _prepare_linked_requirement(self, req, parallel_builds): - # type: (InstallRequirement, bool) -> Distribution + def _prepare_linked_requirement( + self, req: InstallRequirement, parallel_builds: bool + ) -> BaseDistribution: assert req.link link = req.link - self._ensure_link_req_src_dir(req, parallel_builds) hashes = self._get_linked_req_hashes(req) - if link.is_existing_dir() and self.in_tree_build: + if hashes and req.is_wheel_from_cache: + assert req.download_info is not None + assert link.is_wheel + assert link.is_file + # We need to verify hashes, and we have found the requirement in the cache + # of locally built wheels. + if ( + isinstance(req.download_info.info, ArchiveInfo) + and req.download_info.info.hashes + and hashes.has_one_of(req.download_info.info.hashes) + ): + # At this point we know the requirement was built from a hashable source + # artifact, and we verified that the cache entry's hash of the original + # artifact matches one of the hashes we expect. We don't verify hashes + # against the cached wheel, because the wheel is not the original. + hashes = None + else: + logger.warning( + "The hashes of the source archive found in cache entry " + "don't match, ignoring cached built wheel " + "and re-downloading source." + ) + req.link = req.cached_wheel_source_link + link = req.link + + self._ensure_link_req_src_dir(req, parallel_builds) + + if link.is_existing_dir(): local_file = None elif link.url not in self._downloaded: try: local_file = unpack_url( - link, req.source_dir, self._download, - self.download_dir, hashes + link, + req.source_dir, + self._download, + self.verbosity, + self.download_dir, + hashes, ) except NetworkConnectionError as exc: raise InstallationError( - 'Could not install requirement {} because of HTTP ' - 'error {} for URL {}'.format(req, exc, link) + "Could not install requirement {} because of HTTP " + "error {} for URL {}".format(req, exc, link) ) else: - file_path, content_type = self._downloaded[link.url] + file_path = self._downloaded[link.url] if hashes: hashes.check_against_path(file_path) - local_file = File(file_path, content_type) + local_file = File(file_path, content_type=None) + + # If download_info is set, we got it from the wheel cache. + if req.download_info is None: + # Editables don't go through this function (see + # prepare_editable_requirement). + assert not req.editable + req.download_info = direct_url_from_link(link, req.source_dir) + # Make sure we have a hash in download_info. If we got it as part of the + # URL, it will have been verified and we can rely on it. Otherwise we + # compute it from the downloaded file. + # FIXME: https://github.com/pypa/pip/issues/11943 + if ( + isinstance(req.download_info.info, ArchiveInfo) + and not req.download_info.info.hashes + and local_file + ): + hash = hash_file(local_file.path)[0].hexdigest() + # We populate info.hash for backward compatibility. + # This will automatically populate info.hashes. + req.download_info.info.hash = f"sha256={hash}" # For use in later processing, # preserve the file path on the requirement. @@ -568,12 +651,15 @@ class RequirementPreparer: req.local_file_path = local_file.path dist = _get_prepared_distribution( - req, self.req_tracker, self.finder, self.build_isolation, + req, + self.build_tracker, + self.finder, + self.build_isolation, + self.check_build_deps, ) return dist - def save_linked_requirement(self, req): - # type: (InstallRequirement) -> None + def save_linked_requirement(self, req: InstallRequirement) -> None: assert self.download_dir is not None assert req.link is not None link = req.link @@ -584,8 +670,9 @@ class RequirementPreparer: if link.is_existing_dir(): logger.debug( - 'Not copying link to destination directory ' - 'since it is a directory: %s', link, + "Not copying link to destination directory " + "since it is a directory: %s", + link, ) return if req.local_file_path is None: @@ -596,31 +683,35 @@ class RequirementPreparer: if not os.path.exists(download_location): shutil.copy(req.local_file_path, download_location) download_path = display_path(download_location) - logger.info('Saved %s', download_path) + logger.info("Saved %s", download_path) def prepare_editable_requirement( self, - req, # type: InstallRequirement - ): - # type: (...) -> Distribution - """Prepare an editable requirement - """ + req: InstallRequirement, + ) -> BaseDistribution: + """Prepare an editable requirement.""" assert req.editable, "cannot prepare a non-editable req as editable" - logger.info('Obtaining %s', req) + logger.info("Obtaining %s", req) with indent_log(): if self.require_hashes: raise InstallationError( - 'The editable requirement {} cannot be installed when ' - 'requiring hashes, because there is no single file to ' - 'hash.'.format(req) + "The editable requirement {} cannot be installed when " + "requiring hashes, because there is no single file to " + "hash.".format(req) ) req.ensure_has_source_dir(self.src_dir) req.update_editable() + assert req.source_dir + req.download_info = direct_url_for_editable(req.unpacked_source_directory) dist = _get_prepared_distribution( - req, self.req_tracker, self.finder, self.build_isolation, + req, + self.build_tracker, + self.finder, + self.build_isolation, + self.check_build_deps, ) req.check_if_exists(self.use_user_site) @@ -629,27 +720,24 @@ class RequirementPreparer: def prepare_installed_requirement( self, - req, # type: InstallRequirement - skip_reason # type: str - ): - # type: (...) -> Distribution - """Prepare an already-installed requirement - """ + req: InstallRequirement, + skip_reason: str, + ) -> BaseDistribution: + """Prepare an already-installed requirement.""" assert req.satisfied_by, "req should have been satisfied but isn't" assert skip_reason is not None, ( "did not get skip reason skipped but req.satisfied_by " "is set to {}".format(req.satisfied_by) ) logger.info( - 'Requirement %s: %s (%s)', - skip_reason, req, req.satisfied_by.version + "Requirement %s: %s (%s)", skip_reason, req, req.satisfied_by.version ) with indent_log(): if self.require_hashes: logger.debug( - 'Since it is already installed, we are trusting this ' - 'package without checking its hash. To ensure a ' - 'completely repeatable environment, install into an ' - 'empty virtualenv.' + "Since it is already installed, we are trusting this " + "package without checking its hash. To ensure a " + "completely repeatable environment, install into an " + "empty virtualenv." ) - return InstalledDistribution(req).get_pkg_resources_distribution() + return InstalledDistribution(req).get_metadata_distribution() diff --git a/env/Lib/site-packages/pip/_internal/pyproject.py b/env/Lib/site-packages/pip/_internal/pyproject.py index 9016d355f8790e74b23c9681ba44c41ca1a7e4e5..eb8e12b2dec992dc38c87510055d6ccb5f66c828 100644 --- a/env/Lib/site-packages/pip/_internal/pyproject.py +++ b/env/Lib/site-packages/pip/_internal/pyproject.py @@ -1,38 +1,34 @@ +import importlib.util import os from collections import namedtuple from typing import Any, List, Optional -from pip._vendor import toml +from pip._vendor import tomli from pip._vendor.packaging.requirements import InvalidRequirement, Requirement -from pip._internal.exceptions import InstallationError +from pip._internal.exceptions import ( + InstallationError, + InvalidPyProjectBuildRequires, + MissingPyProjectBuildRequires, +) -def _is_list_of_str(obj): - # type: (Any) -> bool - return ( - isinstance(obj, list) and - all(isinstance(item, str) for item in obj) - ) +def _is_list_of_str(obj: Any) -> bool: + return isinstance(obj, list) and all(isinstance(item, str) for item in obj) -def make_pyproject_path(unpacked_source_directory): - # type: (str) -> str - return os.path.join(unpacked_source_directory, 'pyproject.toml') +def make_pyproject_path(unpacked_source_directory: str) -> str: + return os.path.join(unpacked_source_directory, "pyproject.toml") -BuildSystemDetails = namedtuple('BuildSystemDetails', [ - 'requires', 'backend', 'check', 'backend_path' -]) +BuildSystemDetails = namedtuple( + "BuildSystemDetails", ["requires", "backend", "check", "backend_path"] +) def load_pyproject_toml( - use_pep517, # type: Optional[bool] - pyproject_toml, # type: str - setup_py, # type: str - req_name # type: str -): - # type: (...) -> Optional[BuildSystemDetails] + use_pep517: Optional[bool], pyproject_toml: str, setup_py: str, req_name: str +) -> Optional[BuildSystemDetails]: """Load the pyproject.toml file. Parameters: @@ -57,9 +53,15 @@ def load_pyproject_toml( has_pyproject = os.path.isfile(pyproject_toml) has_setup = os.path.isfile(setup_py) + if not has_pyproject and not has_setup: + raise InstallationError( + f"{req_name} does not appear to be a Python project: " + f"neither 'setup.py' nor 'pyproject.toml' found." + ) + if has_pyproject: with open(pyproject_toml, encoding="utf-8") as f: - pp_toml = toml.load(f) + pp_toml = tomli.loads(f.read()) build_system = pp_toml.get("build-system") else: build_system = None @@ -82,17 +84,26 @@ def load_pyproject_toml( raise InstallationError( "Disabling PEP 517 processing is invalid: " "project specifies a build backend of {} " - "in pyproject.toml".format( - build_system["build-backend"] - ) + "in pyproject.toml".format(build_system["build-backend"]) ) use_pep517 = True # If we haven't worked out whether to use PEP 517 yet, # and the user hasn't explicitly stated a preference, - # we do so if the project has a pyproject.toml file. + # we do so if the project has a pyproject.toml file + # or if we cannot import setuptools or wheels. + + # We fallback to PEP 517 when without setuptools or without the wheel package, + # so setuptools can be installed as a default build backend. + # For more info see: + # https://discuss.python.org/t/pip-without-setuptools-could-the-experience-be-improved/11810/9 + # https://github.com/pypa/pip/issues/8559 elif use_pep517 is None: - use_pep517 = has_pyproject + use_pep517 = ( + has_pyproject + or not importlib.util.find_spec("setuptools") + or not importlib.util.find_spec("wheel") + ) # At this point, we know whether we're going to use PEP 517. assert use_pep517 is not None @@ -124,52 +135,37 @@ def load_pyproject_toml( # Ensure that the build-system section in pyproject.toml conforms # to PEP 518. - error_template = ( - "{package} has a pyproject.toml file that does not comply " - "with PEP 518: {reason}" - ) # Specifying the build-system table but not the requires key is invalid if "requires" not in build_system: - raise InstallationError( - error_template.format(package=req_name, reason=( - "it has a 'build-system' table but not " - "'build-system.requires' which is mandatory in the table" - )) - ) + raise MissingPyProjectBuildRequires(package=req_name) # Error out if requires is not a list of strings requires = build_system["requires"] if not _is_list_of_str(requires): - raise InstallationError(error_template.format( + raise InvalidPyProjectBuildRequires( package=req_name, - reason="'build-system.requires' is not a list of strings.", - )) + reason="It is not a list of strings.", + ) # Each requirement must be valid as per PEP 508 for requirement in requires: try: Requirement(requirement) - except InvalidRequirement: - raise InstallationError( - error_template.format( - package=req_name, - reason=( - "'build-system.requires' contains an invalid " - "requirement: {!r}".format(requirement) - ), - ) - ) + except InvalidRequirement as error: + raise InvalidPyProjectBuildRequires( + package=req_name, + reason=f"It contains an invalid requirement: {requirement!r}", + ) from error backend = build_system.get("build-backend") backend_path = build_system.get("backend-path", []) - check = [] # type: List[str] + check: List[str] = [] if backend is None: # If the user didn't specify a backend, we assume they want to use # the setuptools backend. But we can't be sure they have included - # a version of setuptools which supplies the backend, or wheel - # (which is needed by the backend) in their requirements. So we - # make a note to check that those requirements are present once + # a version of setuptools which supplies the backend. So we + # make a note to check that this requirement is present once # we have set up the environment. # This is quite a lot of work to check for a very specific case. But # the problem is, that case is potentially quite common - projects that @@ -178,6 +174,6 @@ def load_pyproject_toml( # tools themselves. The original PEP 518 code had a similar check (but # implemented in a different way). backend = "setuptools.build_meta:__legacy__" - check = ["setuptools>=40.8.0", "wheel"] + check = ["setuptools>=40.8.0"] return BuildSystemDetails(requires, backend, check, backend_path) diff --git a/env/Lib/site-packages/pip/_internal/req/__init__.py b/env/Lib/site-packages/pip/_internal/req/__init__.py index 06f0a0823f136191bf804135ca795adb77bfaada..16de903a44cbfdf2f4dc40ee581059155fa1a9b3 100644 --- a/env/Lib/site-packages/pip/_internal/req/__init__.py +++ b/env/Lib/site-packages/pip/_internal/req/__init__.py @@ -1,6 +1,6 @@ import collections import logging -from typing import Iterator, List, Optional, Sequence, Tuple +from typing import Generator, List, Optional, Sequence, Tuple from pip._internal.utils.logging import indent_log @@ -9,44 +9,41 @@ from .req_install import InstallRequirement from .req_set import RequirementSet __all__ = [ - "RequirementSet", "InstallRequirement", - "parse_requirements", "install_given_reqs", + "RequirementSet", + "InstallRequirement", + "parse_requirements", + "install_given_reqs", ] logger = logging.getLogger(__name__) class InstallationResult: - def __init__(self, name): - # type: (str) -> None + def __init__(self, name: str) -> None: self.name = name - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return f"InstallationResult(name={self.name!r})" def _validate_requirements( - requirements, # type: List[InstallRequirement] -): - # type: (...) -> Iterator[Tuple[str, InstallRequirement]] + requirements: List[InstallRequirement], +) -> Generator[Tuple[str, InstallRequirement], None, None]: for req in requirements: assert req.name, f"invalid to-be-installed requirement: {req}" yield req.name, req def install_given_reqs( - requirements, # type: List[InstallRequirement] - install_options, # type: List[str] - global_options, # type: Sequence[str] - root, # type: Optional[str] - home, # type: Optional[str] - prefix, # type: Optional[str] - warn_script_location, # type: bool - use_user_site, # type: bool - pycompile, # type: bool -): - # type: (...) -> List[InstallationResult] + requirements: List[InstallRequirement], + global_options: Sequence[str], + root: Optional[str], + home: Optional[str], + prefix: Optional[str], + warn_script_location: bool, + use_user_site: bool, + pycompile: bool, +) -> List[InstallationResult]: """ Install everything in the given list. @@ -56,8 +53,8 @@ def install_given_reqs( if to_install: logger.info( - 'Installing collected packages: %s', - ', '.join(to_install.keys()), + "Installing collected packages: %s", + ", ".join(to_install.keys()), ) installed = [] @@ -65,17 +62,14 @@ def install_given_reqs( with indent_log(): for req_name, requirement in to_install.items(): if requirement.should_reinstall: - logger.info('Attempting uninstall: %s', req_name) + logger.info("Attempting uninstall: %s", req_name) with indent_log(): - uninstalled_pathset = requirement.uninstall( - auto_confirm=True - ) + uninstalled_pathset = requirement.uninstall(auto_confirm=True) else: uninstalled_pathset = None try: requirement.install( - install_options, global_options, root=root, home=home, diff --git a/env/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 0d885a37520c3b3217e78fc6cb6234c6c0e88a2c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/req/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-39.pyc deleted file mode 100644 index baafd90039bce100bf9133d5ac821014652d809c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/req/__pycache__/constructors.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-39.pyc deleted file mode 100644 index a25b0ce6f0cf68355884b766e8202b2cec5915a2..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/req/__pycache__/req_file.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-39.pyc deleted file mode 100644 index 20a582b9dedfd7eb8a1a1618ff02c95261c2c24d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/req/__pycache__/req_install.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-39.pyc deleted file mode 100644 index ae2f115ee96cf7a029041a3522402ee344b888d5..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/req/__pycache__/req_set.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/req/__pycache__/req_tracker.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/req/__pycache__/req_tracker.cpython-39.pyc deleted file mode 100644 index 62c7068c5d89ef9d8e8bd8db579753e39128cd2e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/req/__pycache__/req_tracker.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-39.pyc deleted file mode 100644 index 58ad9d78cad1ff38a3ef66aa1765f5687f9cc177..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/req/constructors.py b/env/Lib/site-packages/pip/_internal/req/constructors.py index 3f9e7dd7734e1dbc905ef1e176a93dc69ece8fe0..c5ca2d85d5176c65a2e90000b0d67390573120a6 100644 --- a/env/Lib/site-packages/pip/_internal/req/constructors.py +++ b/env/Lib/site-packages/pip/_internal/req/constructors.py @@ -11,37 +11,36 @@ InstallRequirement. import logging import os import re -from typing import Any, Dict, Optional, Set, Tuple, Union +from typing import Dict, List, Optional, Set, Tuple, Union from pip._vendor.packaging.markers import Marker from pip._vendor.packaging.requirements import InvalidRequirement, Requirement from pip._vendor.packaging.specifiers import Specifier -from pip._vendor.pkg_resources import RequirementParseError, parse_requirements from pip._internal.exceptions import InstallationError from pip._internal.models.index import PyPI, TestPyPI from pip._internal.models.link import Link from pip._internal.models.wheel import Wheel -from pip._internal.pyproject import make_pyproject_path from pip._internal.req.req_file import ParsedRequirement from pip._internal.req.req_install import InstallRequirement from pip._internal.utils.filetypes import is_archive_file from pip._internal.utils.misc import is_installable_dir +from pip._internal.utils.packaging import get_requirement from pip._internal.utils.urls import path_to_url from pip._internal.vcs import is_url, vcs __all__ = [ - "install_req_from_editable", "install_req_from_line", - "parse_editable" + "install_req_from_editable", + "install_req_from_line", + "parse_editable", ] logger = logging.getLogger(__name__) operators = Specifier._operators.keys() -def _strip_extras(path): - # type: (str) -> Tuple[str, Optional[str]] - m = re.match(r'^(.+)(\[[^\]]+\])$', path) +def _strip_extras(path: str) -> Tuple[str, Optional[str]]: + m = re.match(r"^(.+)(\[[^\]]+\])$", path) extras = None if m: path_no_extras = m.group(1) @@ -52,15 +51,13 @@ def _strip_extras(path): return path_no_extras, extras -def convert_extras(extras): - # type: (Optional[str]) -> Set[str] +def convert_extras(extras: Optional[str]) -> Set[str]: if not extras: return set() - return Requirement("placeholder" + extras.lower()).extras + return get_requirement("placeholder" + extras.lower()).extras -def parse_editable(editable_req): - # type: (str) -> Tuple[Optional[str], str, Set[str]] +def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]: """Parses an editable requirement into: - a requirement name - an URL @@ -77,39 +74,23 @@ def parse_editable(editable_req): url_no_extras, extras = _strip_extras(url) if os.path.isdir(url_no_extras): - setup_py = os.path.join(url_no_extras, 'setup.py') - setup_cfg = os.path.join(url_no_extras, 'setup.cfg') - if not os.path.exists(setup_py) and not os.path.exists(setup_cfg): - msg = ( - 'File "setup.py" or "setup.cfg" not found. Directory cannot be ' - 'installed in editable mode: {}' - .format(os.path.abspath(url_no_extras)) - ) - pyproject_path = make_pyproject_path(url_no_extras) - if os.path.isfile(pyproject_path): - msg += ( - '\n(A "pyproject.toml" file was found, but editable ' - 'mode currently requires a setuptools-based build.)' - ) - raise InstallationError(msg) - # Treating it as code that has already been checked out url_no_extras = path_to_url(url_no_extras) - if url_no_extras.lower().startswith('file:'): + if url_no_extras.lower().startswith("file:"): package_name = Link(url_no_extras).egg_fragment if extras: return ( package_name, url_no_extras, - Requirement("placeholder" + extras.lower()).extras, + get_requirement("placeholder" + extras.lower()).extras, ) else: return package_name, url_no_extras, set() for version_control in vcs: - if url.lower().startswith(f'{version_control}:'): - url = f'{version_control}+{url}' + if url.lower().startswith(f"{version_control}:"): + url = f"{version_control}+{url}" break link = Link(url) @@ -117,9 +98,9 @@ def parse_editable(editable_req): if not link.is_vcs: backends = ", ".join(vcs.all_schemes) raise InstallationError( - f'{editable_req} is not a valid editable requirement. ' - f'It should either be a path to a local project or a VCS URL ' - f'(beginning with {backends}).' + f"{editable_req} is not a valid editable requirement. " + f"It should either be a path to a local project or a VCS URL " + f"(beginning with {backends})." ) package_name = link.egg_fragment @@ -131,44 +112,66 @@ def parse_editable(editable_req): return package_name, url, set() -def deduce_helpful_msg(req): - # type: (str) -> str +def check_first_requirement_in_file(filename: str) -> None: + """Check if file is parsable as a requirements file. + + This is heavily based on ``pkg_resources.parse_requirements``, but + simplified to just check the first meaningful line. + + :raises InvalidRequirement: If the first meaningful line cannot be parsed + as an requirement. + """ + with open(filename, encoding="utf-8", errors="ignore") as f: + # Create a steppable iterator, so we can handle \-continuations. + lines = ( + line + for line in (line.strip() for line in f) + if line and not line.startswith("#") # Skip blank lines/comments. + ) + + for line in lines: + # Drop comments -- a hash without a space may be in a URL. + if " #" in line: + line = line[: line.find(" #")] + # If there is a line continuation, drop it, and append the next line. + if line.endswith("\\"): + line = line[:-2].strip() + next(lines, "") + Requirement(line) + return + + +def deduce_helpful_msg(req: str) -> str: """Returns helpful msg in case requirements file does not exist, or cannot be parsed. :params req: Requirements file path """ - msg = "" - if os.path.exists(req): - msg = " The path does exist. " - # Try to parse and check if it is a requirements file. - try: - with open(req) as fp: - # parse first line only - next(parse_requirements(fp.read())) - msg += ( - "The argument you provided " - "({}) appears to be a" - " requirements file. If that is the" - " case, use the '-r' flag to install" - " the packages specified within it." - ).format(req) - except RequirementParseError: - logger.debug( - "Cannot parse '%s' as requirements file", req, exc_info=True - ) + if not os.path.exists(req): + return f" File '{req}' does not exist." + msg = " The path does exist. " + # Try to parse and check if it is a requirements file. + try: + check_first_requirement_in_file(req) + except InvalidRequirement: + logger.debug("Cannot parse '%s' as requirements file", req) else: - msg += f" File '{req}' does not exist." + msg += ( + f"The argument you provided " + f"({req}) appears to be a" + f" requirements file. If that is the" + f" case, use the '-r' flag to install" + f" the packages specified within it." + ) return msg class RequirementParts: def __init__( - self, - requirement, # type: Optional[Requirement] - link, # type: Optional[Link] - markers, # type: Optional[Marker] - extras, # type: Set[str] + self, + requirement: Optional[Requirement], + link: Optional[Link], + markers: Optional[Marker], + extras: Set[str], ): self.requirement = requirement self.link = link @@ -176,13 +179,12 @@ class RequirementParts: self.extras = extras -def parse_req_from_editable(editable_req): - # type: (str) -> RequirementParts +def parse_req_from_editable(editable_req: str) -> RequirementParts: name, url, extras_override = parse_editable(editable_req) if name is not None: try: - req = Requirement(name) # type: Optional[Requirement] + req: Optional[Requirement] = Requirement(name) except InvalidRequirement: raise InstallationError(f"Invalid requirement: '{name}'") else: @@ -197,16 +199,18 @@ def parse_req_from_editable(editable_req): def install_req_from_editable( - editable_req, # type: str - comes_from=None, # type: Optional[Union[InstallRequirement, str]] - use_pep517=None, # type: Optional[bool] - isolated=False, # type: bool - options=None, # type: Optional[Dict[str, Any]] - constraint=False, # type: bool - user_supplied=False, # type: bool -): - # type: (...) -> InstallRequirement - + editable_req: str, + comes_from: Optional[Union[InstallRequirement, str]] = None, + *, + use_pep517: Optional[bool] = None, + isolated: bool = False, + global_options: Optional[List[str]] = None, + hash_options: Optional[Dict[str, List[str]]] = None, + constraint: bool = False, + user_supplied: bool = False, + permit_editable_wheels: bool = False, + config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, +) -> InstallRequirement: parts = parse_req_from_editable(editable_req) return InstallRequirement( @@ -214,19 +218,19 @@ def install_req_from_editable( comes_from=comes_from, user_supplied=user_supplied, editable=True, + permit_editable_wheels=permit_editable_wheels, link=parts.link, constraint=constraint, use_pep517=use_pep517, isolated=isolated, - install_options=options.get("install_options", []) if options else [], - global_options=options.get("global_options", []) if options else [], - hash_options=options.get("hashes", {}) if options else {}, + global_options=global_options, + hash_options=hash_options, + config_settings=config_settings, extras=parts.extras, ) -def _looks_like_path(name): - # type: (str) -> bool +def _looks_like_path(name: str) -> bool: """Checks whether the string "looks like" a path on the filesystem. This does not check whether the target actually exists, only judge from the @@ -245,11 +249,10 @@ def _looks_like_path(name): return False -def _get_url_from_path(path, name): - # type: (str, str) -> Optional[str] +def _get_url_from_path(path: str, name: str) -> Optional[str]: """ - First, it checks whether a provided path is an installable directory - (e.g. it has a setup.py). If it is, returns the path. + First, it checks whether a provided path is an installable directory. If it + is, returns the path. If false, check if the path is an archive file (such as a .whl). The function checks if the path is a file. If false, if the path has @@ -258,6 +261,8 @@ def _get_url_from_path(path, name): if _looks_like_path(name) and os.path.isdir(path): if is_installable_dir(path): return path_to_url(path) + # TODO: The is_installable_dir test here might not be necessary + # now that it is done in load_pyproject_toml too. raise InstallationError( f"Directory {name!r} is not installable. Neither 'setup.py' " "nor 'pyproject.toml' found." @@ -266,25 +271,23 @@ def _get_url_from_path(path, name): return None if os.path.isfile(path): return path_to_url(path) - urlreq_parts = name.split('@', 1) + urlreq_parts = name.split("@", 1) if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]): # If the path contains '@' and the part before it does not look # like a path, try to treat it as a PEP 440 URL req instead. return None logger.warning( - 'Requirement %r looks like a filename, but the ' - 'file does not exist', - name + "Requirement %r looks like a filename, but the file does not exist", + name, ) return path_to_url(path) -def parse_req_from_line(name, line_source): - # type: (str, Optional[str]) -> RequirementParts +def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementParts: if is_url(name): - marker_sep = '; ' + marker_sep = "; " else: - marker_sep = ';' + marker_sep = ";" if marker_sep in name: name, markers_as_string = name.split(marker_sep, 1) markers_as_string = markers_as_string.strip() @@ -311,9 +314,8 @@ def parse_req_from_line(name, line_source): # it's a local file, dir, or url if link: # Handle relative file URLs - if link.scheme == 'file' and re.search(r'\.\./', link.url): - link = Link( - path_to_url(os.path.normpath(os.path.abspath(link.path)))) + if link.scheme == "file" and re.search(r"\.\./", link.url): + link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path)))) # wheel file if link.is_wheel: wheel = Wheel(link.filename) # can raise InvalidWheelFilename @@ -329,29 +331,27 @@ def parse_req_from_line(name, line_source): extras = convert_extras(extras_as_string) - def with_source(text): - # type: (str) -> str + def with_source(text: str) -> str: if not line_source: return text - return f'{text} (from {line_source})' + return f"{text} (from {line_source})" def _parse_req_string(req_as_string: str) -> Requirement: try: - req = Requirement(req_as_string) + req = get_requirement(req_as_string) except InvalidRequirement: if os.path.sep in req_as_string: add_msg = "It looks like a path." add_msg += deduce_helpful_msg(req_as_string) - elif ('=' in req_as_string and - not any(op in req_as_string for op in operators)): + elif "=" in req_as_string and not any( + op in req_as_string for op in operators + ): add_msg = "= is not a valid operator. Did you mean == ?" else: - add_msg = '' - msg = with_source( - f'Invalid requirement: {req_as_string!r}' - ) + add_msg = "" + msg = with_source(f"Invalid requirement: {req_as_string!r}") if add_msg: - msg += f'\nHint: {add_msg}' + msg += f"\nHint: {add_msg}" raise InstallationError(msg) else: # Deprecate extras after specifiers: "name>=1.0[extras]" @@ -360,13 +360,13 @@ def parse_req_from_line(name, line_source): # RequirementParts for spec in req.specifier: spec_str = str(spec) - if spec_str.endswith(']'): + if spec_str.endswith("]"): msg = f"Extras after version '{spec_str}'." raise InstallationError(msg) return req if req_as_string is not None: - req = _parse_req_string(req_as_string) # type: Optional[Requirement] + req: Optional[Requirement] = _parse_req_string(req_as_string) else: req = None @@ -374,16 +374,18 @@ def parse_req_from_line(name, line_source): def install_req_from_line( - name, # type: str - comes_from=None, # type: Optional[Union[str, InstallRequirement]] - use_pep517=None, # type: Optional[bool] - isolated=False, # type: bool - options=None, # type: Optional[Dict[str, Any]] - constraint=False, # type: bool - line_source=None, # type: Optional[str] - user_supplied=False, # type: bool -): - # type: (...) -> InstallRequirement + name: str, + comes_from: Optional[Union[str, InstallRequirement]] = None, + *, + use_pep517: Optional[bool] = None, + isolated: bool = False, + global_options: Optional[List[str]] = None, + hash_options: Optional[Dict[str, List[str]]] = None, + constraint: bool = False, + line_source: Optional[str] = None, + user_supplied: bool = False, + config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, +) -> InstallRequirement: """Creates an InstallRequirement from a name, which might be a requirement, directory containing 'setup.py', filename, or URL. @@ -393,11 +395,15 @@ def install_req_from_line( parts = parse_req_from_line(name, line_source) return InstallRequirement( - parts.requirement, comes_from, link=parts.link, markers=parts.markers, - use_pep517=use_pep517, isolated=isolated, - install_options=options.get("install_options", []) if options else [], - global_options=options.get("global_options", []) if options else [], - hash_options=options.get("hashes", {}) if options else {}, + parts.requirement, + comes_from, + link=parts.link, + markers=parts.markers, + use_pep517=use_pep517, + isolated=isolated, + global_options=global_options, + hash_options=hash_options, + config_settings=config_settings, constraint=constraint, extras=parts.extras, user_supplied=user_supplied, @@ -405,15 +411,14 @@ def install_req_from_line( def install_req_from_req_string( - req_string, # type: str - comes_from=None, # type: Optional[InstallRequirement] - isolated=False, # type: bool - use_pep517=None, # type: Optional[bool] - user_supplied=False, # type: bool -): - # type: (...) -> InstallRequirement + req_string: str, + comes_from: Optional[InstallRequirement] = None, + isolated: bool = False, + use_pep517: Optional[bool] = None, + user_supplied: bool = False, +) -> InstallRequirement: try: - req = Requirement(req_string) + req = get_requirement(req_string) except InvalidRequirement: raise InstallationError(f"Invalid requirement: '{req_string}'") @@ -421,8 +426,12 @@ def install_req_from_req_string( PyPI.file_storage_domain, TestPyPI.file_storage_domain, ] - if (req.url and comes_from and comes_from.link and - comes_from.link.netloc in domains_not_allowed): + if ( + req.url + and comes_from + and comes_from.link + and comes_from.link.netloc in domains_not_allowed + ): # Explicitly disallow pypi packages that depend on external urls raise InstallationError( "Packages installed from PyPI cannot depend on packages " @@ -440,12 +449,12 @@ def install_req_from_req_string( def install_req_from_parsed_requirement( - parsed_req, # type: ParsedRequirement - isolated=False, # type: bool - use_pep517=None, # type: Optional[bool] - user_supplied=False, # type: bool -): - # type: (...) -> InstallRequirement + parsed_req: ParsedRequirement, + isolated: bool = False, + use_pep517: Optional[bool] = None, + user_supplied: bool = False, + config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, +) -> InstallRequirement: if parsed_req.is_editable: req = install_req_from_editable( parsed_req.requirement, @@ -454,6 +463,7 @@ def install_req_from_parsed_requirement( constraint=parsed_req.constraint, isolated=isolated, user_supplied=user_supplied, + config_settings=config_settings, ) else: @@ -462,16 +472,25 @@ def install_req_from_parsed_requirement( comes_from=parsed_req.comes_from, use_pep517=use_pep517, isolated=isolated, - options=parsed_req.options, + global_options=( + parsed_req.options.get("global_options", []) + if parsed_req.options + else [] + ), + hash_options=( + parsed_req.options.get("hashes", {}) if parsed_req.options else {} + ), constraint=parsed_req.constraint, line_source=parsed_req.line_source, user_supplied=user_supplied, + config_settings=config_settings, ) return req -def install_req_from_link_and_ireq(link, ireq): - # type: (Link, InstallRequirement) -> InstallRequirement +def install_req_from_link_and_ireq( + link: Link, ireq: InstallRequirement +) -> InstallRequirement: return InstallRequirement( req=ireq.req, comes_from=ireq.comes_from, @@ -480,7 +499,8 @@ def install_req_from_link_and_ireq(link, ireq): markers=ireq.markers, use_pep517=ireq.use_pep517, isolated=ireq.isolated, - install_options=ireq.install_options, global_options=ireq.global_options, hash_options=ireq.hash_options, + config_settings=ireq.config_settings, + user_supplied=ireq.user_supplied, ) diff --git a/env/Lib/site-packages/pip/_internal/req/req_file.py b/env/Lib/site-packages/pip/_internal/req/req_file.py index 080c128199dbcd44ffe941fe4afe1c4c7958e963..f717c1ccc79f7581f1293b3fcf1a0764def7a84a 100644 --- a/env/Lib/site-packages/pip/_internal/req/req_file.py +++ b/env/Lib/site-packages/pip/_internal/req/req_file.py @@ -2,13 +2,24 @@ Requirements file parsing """ +import logging import optparse import os import re import shlex import urllib.parse from optparse import Values -from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Optional, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generator, + Iterable, + List, + Optional, + Tuple, +) from pip._internal.cli import cmdoptions from pip._internal.exceptions import InstallationError, RequirementsFileParseError @@ -16,7 +27,7 @@ from pip._internal.models.search_scope import SearchScope from pip._internal.network.session import PipSession from pip._internal.network.utils import raise_for_status from pip._internal.utils.encoding import auto_decode -from pip._internal.utils.urls import get_url_scheme, url_to_path +from pip._internal.utils.urls import get_url_scheme if TYPE_CHECKING: # NoReturn introduced in 3.6.2; imported only for type checking to maintain @@ -25,22 +36,22 @@ if TYPE_CHECKING: from pip._internal.index.package_finder import PackageFinder -__all__ = ['parse_requirements'] +__all__ = ["parse_requirements"] -ReqFileLines = Iterator[Tuple[int, str]] +ReqFileLines = Iterable[Tuple[int, str]] LineParser = Callable[[str], Tuple[str, Values]] -SCHEME_RE = re.compile(r'^(http|https|file):', re.I) -COMMENT_RE = re.compile(r'(^|\s+)#.*$') +SCHEME_RE = re.compile(r"^(http|https|file):", re.I) +COMMENT_RE = re.compile(r"(^|\s+)#.*$") # Matches environment variable-style values in '${MY_VARIABLE_1}' with the # variable name consisting of only uppercase letters, digits or the '_' # (underscore). This follows the POSIX standard defined in IEEE Std 1003.1, # 2013 Edition. -ENV_VAR_RE = re.compile(r'(?P<var>\$\{(?P<name>[A-Z0-9_]+)\})') +ENV_VAR_RE = re.compile(r"(?P<var>\$\{(?P<name>[A-Z0-9_]+)\})") -SUPPORTED_OPTIONS = [ +SUPPORTED_OPTIONS: List[Callable[..., optparse.Option]] = [ cmdoptions.index_url, cmdoptions.extra_index_url, cmdoptions.no_index, @@ -55,30 +66,31 @@ SUPPORTED_OPTIONS = [ cmdoptions.pre, cmdoptions.trusted_host, cmdoptions.use_new_feature, -] # type: List[Callable[..., optparse.Option]] +] # options to be passed to requirements -SUPPORTED_OPTIONS_REQ = [ - cmdoptions.install_options, +SUPPORTED_OPTIONS_REQ: List[Callable[..., optparse.Option]] = [ cmdoptions.global_options, cmdoptions.hash, -] # type: List[Callable[..., optparse.Option]] + cmdoptions.config_settings, +] # the 'dest' string values SUPPORTED_OPTIONS_REQ_DEST = [str(o().dest) for o in SUPPORTED_OPTIONS_REQ] +logger = logging.getLogger(__name__) + class ParsedRequirement: def __init__( self, - requirement, # type:str - is_editable, # type: bool - comes_from, # type: str - constraint, # type: bool - options=None, # type: Optional[Dict[str, Any]] - line_source=None, # type: Optional[str] - ): - # type: (...) -> None + requirement: str, + is_editable: bool, + comes_from: str, + constraint: bool, + options: Optional[Dict[str, Any]] = None, + line_source: Optional[str] = None, + ) -> None: self.requirement = requirement self.is_editable = is_editable self.comes_from = comes_from @@ -90,13 +102,12 @@ class ParsedRequirement: class ParsedLine: def __init__( self, - filename, # type: str - lineno, # type: int - args, # type: str - opts, # type: Values - constraint, # type: bool - ): - # type: (...) -> None + filename: str, + lineno: int, + args: str, + opts: Values, + constraint: bool, + ) -> None: self.filename = filename self.lineno = lineno self.opts = opts @@ -116,13 +127,12 @@ class ParsedLine: def parse_requirements( - filename, # type: str - session, # type: PipSession - finder=None, # type: Optional[PackageFinder] - options=None, # type: Optional[optparse.Values] - constraint=False, # type: bool -): - # type: (...) -> Iterator[ParsedRequirement] + filename: str, + session: PipSession, + finder: Optional["PackageFinder"] = None, + options: Optional[optparse.Values] = None, + constraint: bool = False, +) -> Generator[ParsedRequirement, None, None]: """Parse a requirements file and yield ParsedRequirement instances. :param filename: Path or url of requirements file. @@ -137,22 +147,18 @@ def parse_requirements( for parsed_line in parser.parse(filename, constraint): parsed_req = handle_line( - parsed_line, - options=options, - finder=finder, - session=session + parsed_line, options=options, finder=finder, session=session ) if parsed_req is not None: yield parsed_req -def preprocess(content): - # type: (str) -> ReqFileLines +def preprocess(content: str) -> ReqFileLines: """Split, filter, and join lines, and return a line iterator :param content: the content of the requirements file """ - lines_enum = enumerate(content.splitlines(), start=1) # type: ReqFileLines + lines_enum: ReqFileLines = enumerate(content.splitlines(), start=1) lines_enum = join_lines(lines_enum) lines_enum = ignore_comments(lines_enum) lines_enum = expand_env_variables(lines_enum) @@ -160,14 +166,14 @@ def preprocess(content): def handle_requirement_line( - line, # type: ParsedLine - options=None, # type: Optional[optparse.Values] -): - # type: (...) -> ParsedRequirement - + line: ParsedLine, + options: Optional[optparse.Values] = None, +) -> ParsedRequirement: # preserve for the nested code path - line_comes_from = '{} {} (line {})'.format( - '-c' if line.constraint else '-r', line.filename, line.lineno, + line_comes_from = "{} {} (line {})".format( + "-c" if line.constraint else "-r", + line.filename, + line.lineno, ) assert line.is_requirement @@ -182,17 +188,13 @@ def handle_requirement_line( constraint=line.constraint, ) else: - if options: - # Disable wheels if the user has specified build options - cmdoptions.check_install_build_global(options, line.opts) - # get the options that apply to requirements req_options = {} for dest in SUPPORTED_OPTIONS_REQ_DEST: if dest in line.opts.__dict__ and line.opts.__dict__[dest]: req_options[dest] = line.opts.__dict__[dest] - line_source = f'line {line.lineno} of {line.filename}' + line_source = f"line {line.lineno} of {line.filename}" return ParsedRequirement( requirement=line.requirement, is_editable=line.is_editable, @@ -204,14 +206,19 @@ def handle_requirement_line( def handle_option_line( - opts, # type: Values - filename, # type: str - lineno, # type: int - finder=None, # type: Optional[PackageFinder] - options=None, # type: Optional[optparse.Values] - session=None, # type: Optional[PipSession] -): - # type: (...) -> None + opts: Values, + filename: str, + lineno: int, + finder: Optional["PackageFinder"] = None, + options: Optional[optparse.Values] = None, + session: Optional[PipSession] = None, +) -> None: + if opts.hashes: + logger.warning( + "%s line %s has --hash but no requirement, and will be ignored.", + filename, + lineno, + ) if options: # percolate options upward @@ -219,19 +226,20 @@ def handle_option_line( options.require_hashes = opts.require_hashes if opts.features_enabled: options.features_enabled.extend( - f for f in opts.features_enabled - if f not in options.features_enabled + f for f in opts.features_enabled if f not in options.features_enabled ) # set finder options if finder: find_links = finder.find_links index_urls = finder.index_urls - if opts.index_url: - index_urls = [opts.index_url] + no_index = finder.search_scope.no_index if opts.no_index is True: + no_index = True index_urls = [] - if opts.extra_index_urls: + if opts.index_url and not no_index: + index_urls = [opts.index_url] + if opts.extra_index_urls and not no_index: index_urls.extend(opts.extra_index_urls) if opts.find_links: # FIXME: it would be nice to keep track of the source @@ -251,6 +259,7 @@ def handle_option_line( search_scope = SearchScope( find_links=find_links, index_urls=index_urls, + no_index=no_index, ) finder.search_scope = search_scope @@ -262,17 +271,16 @@ def handle_option_line( if session: for host in opts.trusted_hosts or []: - source = f'line {lineno} of {filename}' + source = f"line {lineno} of {filename}" session.add_trusted_host(host, source=source) def handle_line( - line, # type: ParsedLine - options=None, # type: Optional[optparse.Values] - finder=None, # type: Optional[PackageFinder] - session=None, # type: Optional[PipSession] -): - # type: (...) -> Optional[ParsedRequirement] + line: ParsedLine, + options: Optional[optparse.Values] = None, + finder: Optional["PackageFinder"] = None, + session: Optional[PipSession] = None, +) -> Optional[ParsedRequirement]: """Handle a single parsed requirements line; This can result in creating/yielding requirements, or updating the finder. @@ -314,25 +322,24 @@ def handle_line( class RequirementsFileParser: def __init__( self, - session, # type: PipSession - line_parser, # type: LineParser - ): - # type: (...) -> None + session: PipSession, + line_parser: LineParser, + ) -> None: self._session = session self._line_parser = line_parser - def parse(self, filename, constraint): - # type: (str, bool) -> Iterator[ParsedLine] - """Parse a given file, yielding parsed lines. - """ + def parse( + self, filename: str, constraint: bool + ) -> Generator[ParsedLine, None, None]: + """Parse a given file, yielding parsed lines.""" yield from self._parse_and_recurse(filename, constraint) - def _parse_and_recurse(self, filename, constraint): - # type: (str, bool) -> Iterator[ParsedLine] + def _parse_and_recurse( + self, filename: str, constraint: bool + ) -> Generator[ParsedLine, None, None]: for line in self._parse_file(filename, constraint): - if ( - not line.is_requirement and - (line.opts.requirements or line.opts.constraints) + if not line.is_requirement and ( + line.opts.requirements or line.opts.constraints ): # parse a nested requirements file if line.opts.requirements: @@ -350,15 +357,17 @@ class RequirementsFileParser: elif not SCHEME_RE.search(req_path): # do a join so relative paths work req_path = os.path.join( - os.path.dirname(filename), req_path, + os.path.dirname(filename), + req_path, ) yield from self._parse_and_recurse(req_path, nested_constraint) else: yield line - def _parse_file(self, filename, constraint): - # type: (str, bool) -> Iterator[ParsedLine] + def _parse_file( + self, filename: str, constraint: bool + ) -> Generator[ParsedLine, None, None]: _, content = get_file_content(filename, self._session) lines_enum = preprocess(content) @@ -368,7 +377,7 @@ class RequirementsFileParser: args_str, opts = self._line_parser(line) except OptionParsingError as e: # add offending line - msg = f'Invalid requirement: {line}\n{e.msg}' + msg = f"Invalid requirement: {line}\n{e.msg}" raise RequirementsFileParseError(msg) yield ParsedLine( @@ -380,10 +389,8 @@ class RequirementsFileParser: ) -def get_line_parser(finder): - # type: (Optional[PackageFinder]) -> LineParser - def parse_line(line): - # type: (str) -> Tuple[str, Values] +def get_line_parser(finder: Optional["PackageFinder"]) -> LineParser: + def parse_line(line: str) -> Tuple[str, Values]: # Build new parser for each line since it accumulates appendable # options. parser = build_parser() @@ -394,39 +401,41 @@ def get_line_parser(finder): args_str, options_str = break_args_options(line) - opts, _ = parser.parse_args(shlex.split(options_str), defaults) + try: + options = shlex.split(options_str) + except ValueError as e: + raise OptionParsingError(f"Could not split options: {options_str}") from e + + opts, _ = parser.parse_args(options, defaults) return args_str, opts return parse_line -def break_args_options(line): - # type: (str) -> Tuple[str, str] +def break_args_options(line: str) -> Tuple[str, str]: """Break up the line into an args and options string. We only want to shlex (and then optparse) the options, not the args. args can contain markers which are corrupted by shlex. """ - tokens = line.split(' ') + tokens = line.split(" ") args = [] options = tokens[:] for token in tokens: - if token.startswith('-') or token.startswith('--'): + if token.startswith("-") or token.startswith("--"): break else: args.append(token) options.pop(0) - return ' '.join(args), ' '.join(options) + return " ".join(args), " ".join(options) class OptionParsingError(Exception): - def __init__(self, msg): - # type: (str) -> None + def __init__(self, msg: str) -> None: self.msg = msg -def build_parser(): - # type: () -> optparse.OptionParser +def build_parser() -> optparse.OptionParser: """ Return a parser for parsing requirement lines """ @@ -439,9 +448,9 @@ def build_parser(): # By default optparse sys.exits on parsing errors. We want to wrap # that in our own exception. - def parser_exit(self, msg): - # type: (Any, str) -> NoReturn + def parser_exit(self: Any, msg: str) -> "NoReturn": raise OptionParsingError(msg) + # NOTE: mypy disallows assigning to a method # https://github.com/python/mypy/issues/2427 parser.exit = parser_exit # type: ignore @@ -449,52 +458,49 @@ def build_parser(): return parser -def join_lines(lines_enum): - # type: (ReqFileLines) -> ReqFileLines +def join_lines(lines_enum: ReqFileLines) -> ReqFileLines: """Joins a line ending in '\' with the previous line (except when following comments). The joined line takes on the index of the first line. """ primary_line_number = None - new_line = [] # type: List[str] + new_line: List[str] = [] for line_number, line in lines_enum: - if not line.endswith('\\') or COMMENT_RE.match(line): + if not line.endswith("\\") or COMMENT_RE.match(line): if COMMENT_RE.match(line): # this ensures comments are always matched later - line = ' ' + line + line = " " + line if new_line: new_line.append(line) assert primary_line_number is not None - yield primary_line_number, ''.join(new_line) + yield primary_line_number, "".join(new_line) new_line = [] else: yield line_number, line else: if not new_line: primary_line_number = line_number - new_line.append(line.strip('\\')) + new_line.append(line.strip("\\")) # last line contains \ if new_line: assert primary_line_number is not None - yield primary_line_number, ''.join(new_line) + yield primary_line_number, "".join(new_line) # TODO: handle space after '\'. -def ignore_comments(lines_enum): - # type: (ReqFileLines) -> ReqFileLines +def ignore_comments(lines_enum: ReqFileLines) -> ReqFileLines: """ Strips comments and filter empty lines. """ for line_number, line in lines_enum: - line = COMMENT_RE.sub('', line) + line = COMMENT_RE.sub("", line) line = line.strip() if line: yield line_number, line -def expand_env_variables(lines_enum): - # type: (ReqFileLines) -> ReqFileLines +def expand_env_variables(lines_enum: ReqFileLines) -> ReqFileLines: """Replace all environment variables that can be retrieved via `os.getenv`. The only allowed format for environment variables defined in the @@ -521,8 +527,7 @@ def expand_env_variables(lines_enum): yield line_number, line -def get_file_content(url, session): - # type: (str, PipSession) -> Tuple[str, str] +def get_file_content(url: str, session: PipSession) -> Tuple[str, str]: """Gets the content of a file; it may be a filename, file: URL, or http: URL. Returns (location, content). Content is unicode. Respects # -*- coding: declarations on the retrieved files. @@ -532,20 +537,16 @@ def get_file_content(url, session): """ scheme = get_url_scheme(url) - if scheme in ['http', 'https']: - # FIXME: catch some errors + # Pip has special support for file:// URLs (LocalFSAdapter). + if scheme in ["http", "https", "file"]: resp = session.get(url) raise_for_status(resp) return resp.url, resp.text - elif scheme == 'file': - url = url_to_path(url) - + # Assume this is a bare path. try: - with open(url, 'rb') as f: + with open(url, "rb") as f: content = auto_decode(f.read()) except OSError as exc: - raise InstallationError( - f'Could not open requirements file: {exc}' - ) + raise InstallationError(f"Could not open requirements file: {exc}") return url, content diff --git a/env/Lib/site-packages/pip/_internal/req/req_install.py b/env/Lib/site-packages/pip/_internal/req/req_install.py index f4d6251412b3e4630e7551879057c6b650b88dd0..1f479713a94495ca72ceeef4806d1c07223284fe 100644 --- a/env/Lib/site-packages/pip/_internal/req/req_install.py +++ b/env/Lib/site-packages/pip/_internal/req/req_install.py @@ -1,55 +1,59 @@ # The following comment should be removed at some point in the future. # mypy: strict-optional=False +import functools import logging import os import shutil import sys import uuid import zipfile -from typing import Any, Dict, Iterable, List, Optional, Sequence, Union +from optparse import Values +from typing import Any, Collection, Dict, Iterable, List, Optional, Sequence, Union -from pip._vendor import pkg_resources, six from pip._vendor.packaging.markers import Marker from pip._vendor.packaging.requirements import Requirement from pip._vendor.packaging.specifiers import SpecifierSet from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.packaging.version import Version from pip._vendor.packaging.version import parse as parse_version -from pip._vendor.pep517.wrappers import Pep517HookCaller -from pip._vendor.pkg_resources import Distribution +from pip._vendor.pyproject_hooks import BuildBackendHookCaller from pip._internal.build_env import BuildEnvironment, NoOpBuildEnvironment from pip._internal.exceptions import InstallationError from pip._internal.locations import get_scheme +from pip._internal.metadata import ( + BaseDistribution, + get_default_environment, + get_directory_distribution, + get_wheel_distribution, +) +from pip._internal.metadata.base import FilesystemWheel +from pip._internal.models.direct_url import DirectUrl from pip._internal.models.link import Link from pip._internal.operations.build.metadata import generate_metadata +from pip._internal.operations.build.metadata_editable import generate_editable_metadata from pip._internal.operations.build.metadata_legacy import ( generate_metadata as generate_metadata_legacy, ) from pip._internal.operations.install.editable_legacy import ( install_editable as install_editable_legacy, ) -from pip._internal.operations.install.legacy import LegacyInstallFailure -from pip._internal.operations.install.legacy import install as install_legacy from pip._internal.operations.install.wheel import install_wheel from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path from pip._internal.req.req_uninstall import UninstallPathSet from pip._internal.utils.deprecation import deprecated -from pip._internal.utils.direct_url_helpers import direct_url_from_link from pip._internal.utils.hashes import Hashes -from pip._internal.utils.logging import indent_log from pip._internal.utils.misc import ( + ConfiguredBuildBackendHookCaller, ask_path_exists, backup_dir, display_path, - dist_in_site_packages, - dist_in_usersite, - get_distribution, hide_url, redact_auth_from_url, ) -from pip._internal.utils.packaging import get_metadata +from pip._internal.utils.packaging import safe_extra +from pip._internal.utils.subprocess import runner_with_spinner_message from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds from pip._internal.utils.virtualenv import running_under_virtualenv from pip._internal.vcs import vcs @@ -57,33 +61,6 @@ from pip._internal.vcs import vcs logger = logging.getLogger(__name__) -def _get_dist(metadata_directory): - # type: (str) -> Distribution - """Return a pkg_resources.Distribution for the provided - metadata directory. - """ - dist_dir = metadata_directory.rstrip(os.sep) - - # Build a PathMetadata object, from path to metadata. :wink: - base_dir, dist_dir_name = os.path.split(dist_dir) - metadata = pkg_resources.PathMetadata(base_dir, dist_dir) - - # Determine the correct Distribution object type. - if dist_dir.endswith(".egg-info"): - dist_cls = pkg_resources.Distribution - dist_name = os.path.splitext(dist_dir_name)[0] - else: - assert dist_dir.endswith(".dist-info") - dist_cls = pkg_resources.DistInfoDistribution - dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0] - - return dist_cls( - base_dir, - project_name=dist_name, - metadata=metadata, - ) - - class InstallRequirement: """ Represents something that may be installed later on, may have information @@ -93,78 +70,84 @@ class InstallRequirement: def __init__( self, - req, # type: Optional[Requirement] - comes_from, # type: Optional[Union[str, InstallRequirement]] - editable=False, # type: bool - link=None, # type: Optional[Link] - markers=None, # type: Optional[Marker] - use_pep517=None, # type: Optional[bool] - isolated=False, # type: bool - install_options=None, # type: Optional[List[str]] - global_options=None, # type: Optional[List[str]] - hash_options=None, # type: Optional[Dict[str, List[str]]] - constraint=False, # type: bool - extras=(), # type: Iterable[str] - user_supplied=False, # type: bool - ): - # type: (...) -> None + req: Optional[Requirement], + comes_from: Optional[Union[str, "InstallRequirement"]], + editable: bool = False, + link: Optional[Link] = None, + markers: Optional[Marker] = None, + use_pep517: Optional[bool] = None, + isolated: bool = False, + *, + global_options: Optional[List[str]] = None, + hash_options: Optional[Dict[str, List[str]]] = None, + config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, + constraint: bool = False, + extras: Collection[str] = (), + user_supplied: bool = False, + permit_editable_wheels: bool = False, + ) -> None: assert req is None or isinstance(req, Requirement), req self.req = req self.comes_from = comes_from self.constraint = constraint self.editable = editable - self.legacy_install_reason = None # type: Optional[int] + self.permit_editable_wheels = permit_editable_wheels # source_dir is the local directory where the linked requirement is # located, or unpacked. In case unpacking is needed, creating and # populating source_dir is done by the RequirementPreparer. Note this # is not necessarily the directory where pyproject.toml or setup.py is # located - that one is obtained via unpacked_source_directory. - self.source_dir = None # type: Optional[str] + self.source_dir: Optional[str] = None if self.editable: assert link if link.is_file: - self.source_dir = os.path.normpath( - os.path.abspath(link.file_path) - ) + self.source_dir = os.path.normpath(os.path.abspath(link.file_path)) + # original_link is the direct URL that was provided by the user for the + # requirement, either directly or via a constraints file. if link is None and req and req.url: # PEP 508 URL requirement link = Link(req.url) self.link = self.original_link = link - self.original_link_is_in_wheel_cache = False + + # When this InstallRequirement is a wheel obtained from the cache of locally + # built wheels, this is the source link corresponding to the cache entry, which + # was used to download and build the cached wheel. + self.cached_wheel_source_link: Optional[Link] = None + + # Information about the location of the artifact that was downloaded . This + # property is guaranteed to be set in resolver results. + self.download_info: Optional[DirectUrl] = None # Path to any downloaded or already-existing package. - self.local_file_path = None # type: Optional[str] + self.local_file_path: Optional[str] = None if self.link and self.link.is_file: self.local_file_path = self.link.file_path if extras: self.extras = extras elif req: - self.extras = { - pkg_resources.safe_extra(extra) for extra in req.extras - } + self.extras = {safe_extra(extra) for extra in req.extras} else: self.extras = set() if markers is None and req: markers = req.marker self.markers = markers - # This holds the pkg_resources.Distribution object if this requirement - # is already available: - self.satisfied_by = None # type: Optional[Distribution] + # This holds the Distribution object if this requirement is already installed. + self.satisfied_by: Optional[BaseDistribution] = None # Whether the installation process should try to uninstall an existing # distribution before installing this requirement. self.should_reinstall = False # Temporary build location - self._temp_build_dir = None # type: Optional[TempDirectory] + self._temp_build_dir: Optional[TempDirectory] = None # Set to True after successful installation - self.install_succeeded = None # type: Optional[bool] + self.install_succeeded: Optional[bool] = None # Supplied options - self.install_options = install_options if install_options else [] self.global_options = global_options if global_options else [] self.hash_options = hash_options if hash_options else {} + self.config_settings = config_settings # Set to True after successful preparation of this requirement self.prepared = False # User supplied requirement are explicitly requested for installation @@ -173,22 +156,22 @@ class InstallRequirement: self.user_supplied = user_supplied self.isolated = isolated - self.build_env = NoOpBuildEnvironment() # type: BuildEnvironment + self.build_env: BuildEnvironment = NoOpBuildEnvironment() # For PEP 517, the directory where we request the project metadata # gets stored. We need this to pass to build_wheel, so the backend # can ensure that the wheel matches the metadata (see the PEP for # details). - self.metadata_directory = None # type: Optional[str] + self.metadata_directory: Optional[str] = None # The static build requirements (from pyproject.toml) - self.pyproject_requires = None # type: Optional[List[str]] + self.pyproject_requires: Optional[List[str]] = None # Build requirements that we will check are available - self.requirements_to_check = [] # type: List[str] + self.requirements_to_check: List[str] = [] # The PEP 517 backend we should use to build the project - self.pep517_backend = None # type: Optional[Pep517HookCaller] + self.pep517_backend: Optional[BuildBackendHookCaller] = None # Are we using PEP 517 for this requirement? # After pyproject.toml has been loaded, the only valid values are True @@ -200,87 +183,97 @@ class InstallRequirement: # This requirement needs more preparation before it can be built self.needs_more_preparation = False - def __str__(self): - # type: () -> str + def __str__(self) -> str: if self.req: s = str(self.req) if self.link: - s += ' from {}'.format(redact_auth_from_url(self.link.url)) + s += " from {}".format(redact_auth_from_url(self.link.url)) elif self.link: s = redact_auth_from_url(self.link.url) else: - s = '<InstallRequirement>' + s = "<InstallRequirement>" if self.satisfied_by is not None: - s += ' in {}'.format(display_path(self.satisfied_by.location)) + if self.satisfied_by.location is not None: + location = display_path(self.satisfied_by.location) + else: + location = "<memory>" + s += f" in {location}" if self.comes_from: if isinstance(self.comes_from, str): - comes_from = self.comes_from # type: Optional[str] + comes_from: Optional[str] = self.comes_from else: comes_from = self.comes_from.from_path() if comes_from: - s += f' (from {comes_from})' + s += f" (from {comes_from})" return s - def __repr__(self): - # type: () -> str - return '<{} object: {} editable={!r}>'.format( - self.__class__.__name__, str(self), self.editable) + def __repr__(self) -> str: + return "<{} object: {} editable={!r}>".format( + self.__class__.__name__, str(self), self.editable + ) - def format_debug(self): - # type: () -> str - """An un-tested helper for getting state, for debugging. - """ + def format_debug(self) -> str: + """An un-tested helper for getting state, for debugging.""" attributes = vars(self) names = sorted(attributes) - state = ( - "{}={!r}".format(attr, attributes[attr]) for attr in sorted(names) - ) - return '<{name} object: {{{state}}}>'.format( + state = ("{}={!r}".format(attr, attributes[attr]) for attr in sorted(names)) + return "<{name} object: {{{state}}}>".format( name=self.__class__.__name__, state=", ".join(state), ) # Things that are valid for all kinds of requirements? @property - def name(self): - # type: () -> Optional[str] + def name(self) -> Optional[str]: if self.req is None: return None - return pkg_resources.safe_name(self.req.name) + return self.req.name + + @functools.lru_cache() # use cached_property in python 3.8+ + def supports_pyproject_editable(self) -> bool: + if not self.use_pep517: + return False + assert self.pep517_backend + with self.build_env: + runner = runner_with_spinner_message( + "Checking if build backend supports build_editable" + ) + with self.pep517_backend.subprocess_runner(runner): + return "build_editable" in self.pep517_backend._supported_features() @property - def specifier(self): - # type: () -> SpecifierSet + def specifier(self) -> SpecifierSet: return self.req.specifier @property - def is_pinned(self): - # type: () -> bool + def is_direct(self) -> bool: + """Whether this requirement was specified as a direct URL.""" + return self.original_link is not None + + @property + def is_pinned(self) -> bool: """Return whether I am pinned to an exact version. For example, some-package==1.2 is pinned; some-package>1.2 is not. """ specifiers = self.specifier - return (len(specifiers) == 1 and - next(iter(specifiers)).operator in {'==', '==='}) + return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="} - def match_markers(self, extras_requested=None): - # type: (Optional[Iterable[str]]) -> bool + def match_markers(self, extras_requested: Optional[Iterable[str]] = None) -> bool: if not extras_requested: # Provide an extra to safely evaluate the markers # without matching any extra - extras_requested = ('',) + extras_requested = ("",) if self.markers is not None: return any( - self.markers.evaluate({'extra': extra}) - for extra in extras_requested) + self.markers.evaluate({"extra": extra}) for extra in extras_requested + ) else: return True @property - def has_hash_options(self): - # type: () -> bool + def has_hash_options(self) -> bool: """Return whether any known-good hashes are specified as options. These activate --require-hashes mode; hashes specified as part of a @@ -289,8 +282,7 @@ class InstallRequirement: """ return bool(self.hash_options) - def hashes(self, trust_internet=True): - # type: (bool) -> Hashes + def hashes(self, trust_internet: bool = True) -> Hashes: """Return a hash-comparer that considers my option- and URL-based hashes to be known-good. @@ -306,15 +298,18 @@ class InstallRequirement: """ good_hashes = self.hash_options.copy() - link = self.link if trust_internet else self.original_link + if trust_internet: + link = self.link + elif self.is_direct and self.user_supplied: + link = self.original_link + else: + link = None if link and link.hash: good_hashes.setdefault(link.hash_name, []).append(link.hash) return Hashes(good_hashes) - def from_path(self): - # type: () -> Optional[str] - """Format a nice indicator to show where this "comes from" - """ + def from_path(self) -> Optional[str]: + """Format a nice indicator to show where this "comes from" """ if self.req is None: return None s = str(self.req) @@ -324,11 +319,12 @@ class InstallRequirement: else: comes_from = self.comes_from.from_path() if comes_from: - s += '->' + comes_from + s += "->" + comes_from return s - def ensure_build_location(self, build_dir, autodelete, parallel_builds): - # type: (str, bool, bool) -> str + def ensure_build_location( + self, build_dir: str, autodelete: bool, parallel_builds: bool + ) -> str: assert build_dir is not None if self._temp_build_dir is not None: assert self._temp_build_dir.path @@ -349,14 +345,14 @@ class InstallRequirement: # When parallel builds are enabled, add a UUID to the build directory # name so multiple builds do not interfere with each other. - dir_name = canonicalize_name(self.name) # type: str + dir_name: str = canonicalize_name(self.name) if parallel_builds: dir_name = f"{dir_name}_{uuid.uuid4().hex}" # FIXME: Is there a better place to create the build_dir? (hg and bzr # need this) if not os.path.exists(build_dir): - logger.debug('Creating directory %s', build_dir) + logger.debug("Creating directory %s", build_dir) os.makedirs(build_dir) actual_build_dir = os.path.join(build_dir, dir_name) # `None` indicates that we respect the globally-configured deletion @@ -369,10 +365,8 @@ class InstallRequirement: globally_managed=True, ).path - def _set_requirement(self): - # type: () -> None - """Set requirement after generating metadata. - """ + def _set_requirement(self) -> None: + """Set requirement after generating metadata.""" assert self.req is None assert self.metadata is not None assert self.source_dir is not None @@ -384,15 +378,16 @@ class InstallRequirement: op = "===" self.req = Requirement( - "".join([ - self.metadata["Name"], - op, - self.metadata["Version"], - ]) + "".join( + [ + self.metadata["Name"], + op, + self.metadata["Version"], + ] + ) ) - def warn_on_mismatching_name(self): - # type: () -> None + def warn_on_mismatching_name(self) -> None: metadata_name = canonicalize_name(self.metadata["Name"]) if canonicalize_name(self.req.name) == metadata_name: # Everything is fine. @@ -400,45 +395,40 @@ class InstallRequirement: # If we're here, there's a mismatch. Log a warning about it. logger.warning( - 'Generating metadata for package %s ' - 'produced metadata for project name %s. Fix your ' - '#egg=%s fragments.', - self.name, metadata_name, self.name + "Generating metadata for package %s " + "produced metadata for project name %s. Fix your " + "#egg=%s fragments.", + self.name, + metadata_name, + self.name, ) self.req = Requirement(metadata_name) - def check_if_exists(self, use_user_site): - # type: (bool) -> None + def check_if_exists(self, use_user_site: bool) -> None: """Find an installed distribution that satisfies or conflicts with this requirement, and set self.satisfied_by or self.should_reinstall appropriately. """ if self.req is None: return - existing_dist = get_distribution(self.req.name) + existing_dist = get_default_environment().get_distribution(self.req.name) if not existing_dist: return - # pkg_resouces may contain a different copy of packaging.version from - # pip in if the downstream distributor does a poor job debundling pip. - # We avoid existing_dist.parsed_version and let SpecifierSet.contains - # parses the version instead. - existing_version = existing_dist.version - version_compatible = ( - existing_version is not None and - self.req.specifier.contains(existing_version, prereleases=True) + version_compatible = self.req.specifier.contains( + existing_dist.version, + prereleases=True, ) if not version_compatible: self.satisfied_by = None if use_user_site: - if dist_in_usersite(existing_dist): + if existing_dist.in_usersite: self.should_reinstall = True - elif (running_under_virtualenv() and - dist_in_site_packages(existing_dist)): + elif running_under_virtualenv() and existing_dist.in_site_packages: raise InstallationError( - "Will not install to the user site because it will " - "lack sys.path precedence to {} in {}".format( - existing_dist.project_name, existing_dist.location) + f"Will not install to the user site because it will " + f"lack sys.path precedence to {existing_dist.raw_name} " + f"in {existing_dist.location}" ) else: self.should_reinstall = True @@ -453,36 +443,44 @@ class InstallRequirement: # Things valid for wheels @property - def is_wheel(self): - # type: () -> bool + def is_wheel(self) -> bool: if not self.link: return False return self.link.is_wheel + @property + def is_wheel_from_cache(self) -> bool: + # When True, it means that this InstallRequirement is a local wheel file in the + # cache of locally built wheels. + return self.cached_wheel_source_link is not None + # Things valid for sdists @property - def unpacked_source_directory(self): - # type: () -> str + def unpacked_source_directory(self) -> str: return os.path.join( - self.source_dir, - self.link and self.link.subdirectory_fragment or '') + self.source_dir, self.link and self.link.subdirectory_fragment or "" + ) @property - def setup_py_path(self): - # type: () -> str + def setup_py_path(self) -> str: assert self.source_dir, f"No source dir for {self}" - setup_py = os.path.join(self.unpacked_source_directory, 'setup.py') + setup_py = os.path.join(self.unpacked_source_directory, "setup.py") return setup_py @property - def pyproject_toml_path(self): - # type: () -> str + def setup_cfg_path(self) -> str: + assert self.source_dir, f"No source dir for {self}" + setup_cfg = os.path.join(self.unpacked_source_directory, "setup.cfg") + + return setup_cfg + + @property + def pyproject_toml_path(self) -> str: assert self.source_dir, f"No source dir for {self}" return make_pyproject_path(self.unpacked_source_directory) - def load_pyproject_toml(self): - # type: () -> None + def load_pyproject_toml(self) -> None: """Load the pyproject.toml file. After calling this routine, all of the attributes related to PEP 517 @@ -491,13 +489,19 @@ class InstallRequirement: follow the PEP 517 or legacy (setup.py) code path. """ pyproject_toml_data = load_pyproject_toml( - self.use_pep517, - self.pyproject_toml_path, - self.setup_py_path, - str(self) + self.use_pep517, self.pyproject_toml_path, self.setup_py_path, str(self) ) if pyproject_toml_data is None: + if self.config_settings: + deprecated( + reason=f"Config settings are ignored for project {self}.", + replacement=( + "to use --use-pep517 or add a " + "pyproject.toml file to the project" + ), + gone_in="23.3", + ) self.use_pep517 = False return @@ -505,48 +509,69 @@ class InstallRequirement: requires, backend, check, backend_path = pyproject_toml_data self.requirements_to_check = check self.pyproject_requires = requires - self.pep517_backend = Pep517HookCaller( - self.unpacked_source_directory, backend, backend_path=backend_path, + self.pep517_backend = ConfiguredBuildBackendHookCaller( + self, + self.unpacked_source_directory, + backend, + backend_path=backend_path, ) - def _generate_metadata(self): - # type: () -> str - """Invokes metadata generator functions, with the required arguments. - """ - if not self.use_pep517: - assert self.unpacked_source_directory - - if not os.path.exists(self.setup_py_path): - raise InstallationError( - f'File "setup.py" not found for legacy project {self}.' - ) + def isolated_editable_sanity_check(self) -> None: + """Check that an editable requirement if valid for use with PEP 517/518. - return generate_metadata_legacy( - build_env=self.build_env, - setup_py_path=self.setup_py_path, - source_dir=self.unpacked_source_directory, - isolated=self.isolated, - details=self.name or f"from {self.link}" + This verifies that an editable that has a pyproject.toml either supports PEP 660 + or as a setup.py or a setup.cfg + """ + if ( + self.editable + and self.use_pep517 + and not self.supports_pyproject_editable() + and not os.path.isfile(self.setup_py_path) + and not os.path.isfile(self.setup_cfg_path) + ): + raise InstallationError( + f"Project {self} has a 'pyproject.toml' and its build " + f"backend is missing the 'build_editable' hook. Since it does not " + f"have a 'setup.py' nor a 'setup.cfg', " + f"it cannot be installed in editable mode. " + f"Consider using a build backend that supports PEP 660." ) - assert self.pep517_backend is not None - - return generate_metadata( - build_env=self.build_env, - backend=self.pep517_backend, - ) - - def prepare_metadata(self): - # type: () -> None + def prepare_metadata(self) -> None: """Ensure that project metadata is available. - Under PEP 517, call the backend hook to prepare the metadata. + Under PEP 517 and PEP 660, call the backend hook to prepare the metadata. Under legacy processing, call setup.py egg-info. """ assert self.source_dir - - with indent_log(): - self.metadata_directory = self._generate_metadata() + details = self.name or f"from {self.link}" + + if self.use_pep517: + assert self.pep517_backend is not None + if ( + self.editable + and self.permit_editable_wheels + and self.supports_pyproject_editable() + ): + self.metadata_directory = generate_editable_metadata( + build_env=self.build_env, + backend=self.pep517_backend, + details=details, + ) + else: + self.metadata_directory = generate_metadata( + build_env=self.build_env, + backend=self.pep517_backend, + details=details, + ) + else: + self.metadata_directory = generate_metadata_legacy( + build_env=self.build_env, + setup_py_path=self.setup_py_path, + source_dir=self.unpacked_source_directory, + isolated=self.isolated, + details=details, + ) # Act on the newly generated metadata, based on the name and version. if not self.name: @@ -557,30 +582,36 @@ class InstallRequirement: self.assert_source_matches_version() @property - def metadata(self): - # type: () -> Any - if not hasattr(self, '_metadata'): - self._metadata = get_metadata(self.get_dist()) + def metadata(self) -> Any: + if not hasattr(self, "_metadata"): + self._metadata = self.get_dist().metadata return self._metadata - def get_dist(self): - # type: () -> Distribution - return _get_dist(self.metadata_directory) + def get_dist(self) -> BaseDistribution: + if self.metadata_directory: + return get_directory_distribution(self.metadata_directory) + elif self.local_file_path and self.is_wheel: + return get_wheel_distribution( + FilesystemWheel(self.local_file_path), canonicalize_name(self.name) + ) + raise AssertionError( + f"InstallRequirement {self} has no metadata directory and no wheel: " + f"can't make a distribution." + ) - def assert_source_matches_version(self): - # type: () -> None + def assert_source_matches_version(self) -> None: assert self.source_dir - version = self.metadata['version'] + version = self.metadata["version"] if self.req.specifier and version not in self.req.specifier: logger.warning( - 'Requested %s, but installing version %s', + "Requested %s, but installing version %s", self, version, ) else: logger.debug( - 'Source in %s has version %s, which satisfies requirement %s', + "Source in %s has version %s, which satisfies requirement %s", display_path(self.source_dir), version, self, @@ -589,11 +620,10 @@ class InstallRequirement: # For both source distributions and editables def ensure_has_source_dir( self, - parent_dir, - autodelete=False, - parallel_builds=False, - ): - # type: (str, bool, bool) -> None + parent_dir: str, + autodelete: bool = False, + parallel_builds: bool = False, + ) -> None: """Ensure that a source_dir is set. This will create a temporary build dir if the name of the requirement @@ -611,18 +641,16 @@ class InstallRequirement: ) # For editable installations - def update_editable(self): - # type: () -> None + def update_editable(self) -> None: if not self.link: logger.debug( - "Cannot update repository at %s; repository location is " - "unknown", + "Cannot update repository at %s; repository location is unknown", self.source_dir, ) return assert self.editable assert self.source_dir - if self.link.scheme == 'file': + if self.link.scheme == "file": # Static paths don't get updated return vcs_backend = vcs.get_backend_for_scheme(self.link.scheme) @@ -630,11 +658,12 @@ class InstallRequirement: # So here, if it's neither a path nor a valid VCS URL, it's a bug. assert vcs_backend, f"Unsupported VCS URL {self.link.url}" hidden_url = hide_url(self.link.url) - vcs_backend.obtain(self.source_dir, url=hidden_url) + vcs_backend.obtain(self.source_dir, url=hidden_url, verbosity=0) # Top-level Actions - def uninstall(self, auto_confirm=False, verbose=False): - # type: (bool, bool) -> Optional[UninstallPathSet] + def uninstall( + self, auto_confirm: bool = False, verbose: bool = False + ) -> Optional[UninstallPathSet]: """ Uninstall the distribution currently satisfying this requirement. @@ -648,34 +677,30 @@ class InstallRequirement: """ assert self.req - dist = get_distribution(self.req.name) + dist = get_default_environment().get_distribution(self.req.name) if not dist: logger.warning("Skipping %s as it is not installed.", self.name) return None - logger.info('Found existing installation: %s', dist) + logger.info("Found existing installation: %s", dist) uninstalled_pathset = UninstallPathSet.from_dist(dist) uninstalled_pathset.remove(auto_confirm, verbose) return uninstalled_pathset - def _get_archive_name(self, path, parentdir, rootdir): - # type: (str, str, str) -> str - - def _clean_zip_name(name, prefix): - # type: (str, str) -> str - assert name.startswith(prefix + os.path.sep), ( - f"name {name!r} doesn't start with prefix {prefix!r}" - ) - name = name[len(prefix) + 1:] - name = name.replace(os.path.sep, '/') + def _get_archive_name(self, path: str, parentdir: str, rootdir: str) -> str: + def _clean_zip_name(name: str, prefix: str) -> str: + assert name.startswith( + prefix + os.path.sep + ), f"name {name!r} doesn't start with prefix {prefix!r}" + name = name[len(prefix) + 1 :] + name = name.replace(os.path.sep, "/") return name path = os.path.join(parentdir, path) name = _clean_zip_name(path, rootdir) - return self.name + '/' + name + return self.name + "/" + name - def archive(self, build_dir): - # type: (Optional[str]) -> None + def archive(self, build_dir: Optional[str]) -> None: """Saves archive to provided build_dir. Used for saving downloaded VCS requirements as part of `pip download`. @@ -685,70 +710,73 @@ class InstallRequirement: return create_archive = True - archive_name = '{}-{}.zip'.format(self.name, self.metadata["version"]) + archive_name = "{}-{}.zip".format(self.name, self.metadata["version"]) archive_path = os.path.join(build_dir, archive_name) if os.path.exists(archive_path): response = ask_path_exists( - 'The file {} exists. (i)gnore, (w)ipe, ' - '(b)ackup, (a)bort '.format( - display_path(archive_path)), - ('i', 'w', 'b', 'a')) - if response == 'i': + "The file {} exists. (i)gnore, (w)ipe, " + "(b)ackup, (a)bort ".format(display_path(archive_path)), + ("i", "w", "b", "a"), + ) + if response == "i": create_archive = False - elif response == 'w': - logger.warning('Deleting %s', display_path(archive_path)) + elif response == "w": + logger.warning("Deleting %s", display_path(archive_path)) os.remove(archive_path) - elif response == 'b': + elif response == "b": dest_file = backup_dir(archive_path) logger.warning( - 'Backing up %s to %s', + "Backing up %s to %s", display_path(archive_path), display_path(dest_file), ) shutil.move(archive_path, dest_file) - elif response == 'a': + elif response == "a": sys.exit(-1) if not create_archive: return zip_output = zipfile.ZipFile( - archive_path, 'w', zipfile.ZIP_DEFLATED, allowZip64=True, + archive_path, + "w", + zipfile.ZIP_DEFLATED, + allowZip64=True, ) with zip_output: - dir = os.path.normcase( - os.path.abspath(self.unpacked_source_directory) - ) + dir = os.path.normcase(os.path.abspath(self.unpacked_source_directory)) for dirpath, dirnames, filenames in os.walk(dir): for dirname in dirnames: dir_arcname = self._get_archive_name( - dirname, parentdir=dirpath, rootdir=dir, + dirname, + parentdir=dirpath, + rootdir=dir, ) - zipdir = zipfile.ZipInfo(dir_arcname + '/') + zipdir = zipfile.ZipInfo(dir_arcname + "/") zipdir.external_attr = 0x1ED << 16 # 0o755 - zip_output.writestr(zipdir, '') + zip_output.writestr(zipdir, "") for filename in filenames: file_arcname = self._get_archive_name( - filename, parentdir=dirpath, rootdir=dir, + filename, + parentdir=dirpath, + rootdir=dir, ) filename = os.path.join(dirpath, filename) zip_output.write(filename, file_arcname) - logger.info('Saved %s', display_path(archive_path)) + logger.info("Saved %s", display_path(archive_path)) def install( self, - install_options, # type: List[str] - global_options=None, # type: Optional[Sequence[str]] - root=None, # type: Optional[str] - home=None, # type: Optional[str] - prefix=None, # type: Optional[str] - warn_script_location=True, # type: bool - use_user_site=False, # type: bool - pycompile=True # type: bool - ): - # type: (...) -> None + global_options: Optional[Sequence[str]] = None, + root: Optional[str] = None, + home: Optional[str] = None, + prefix: Optional[str] = None, + warn_script_location: bool = True, + use_user_site: bool = False, + pycompile: bool = True, + ) -> None: scheme = get_scheme( self.name, user=use_user_site, @@ -758,11 +786,9 @@ class InstallRequirement: prefix=prefix, ) - global_options = global_options if global_options is not None else [] - if self.editable: + if self.editable and not self.is_wheel: install_editable_legacy( - install_options, - global_options, + global_options=global_options if global_options is not None else [], prefix=prefix, home=home, use_user_site=use_user_site, @@ -775,80 +801,23 @@ class InstallRequirement: self.install_succeeded = True return - if self.is_wheel: - assert self.local_file_path - direct_url = None - if self.original_link: - direct_url = direct_url_from_link( - self.original_link, - self.source_dir, - self.original_link_is_in_wheel_cache, - ) - install_wheel( - self.name, - self.local_file_path, - scheme=scheme, - req_description=str(self.req), - pycompile=pycompile, - warn_script_location=warn_script_location, - direct_url=direct_url, - requested=self.user_supplied, - ) - self.install_succeeded = True - return - - # TODO: Why don't we do this for editable installs? - - # Extend the list of global and install options passed on to - # the setup.py call with the ones from the requirements file. - # Options specified in requirements file override those - # specified on the command line, since the last option given - # to setup.py is the one that is used. - global_options = list(global_options) + self.global_options - install_options = list(install_options) + self.install_options - - try: - success = install_legacy( - install_options=install_options, - global_options=global_options, - root=root, - home=home, - prefix=prefix, - use_user_site=use_user_site, - pycompile=pycompile, - scheme=scheme, - setup_py_path=self.setup_py_path, - isolated=self.isolated, - req_name=self.name, - build_env=self.build_env, - unpacked_source_directory=self.unpacked_source_directory, - req_description=str(self.req), - ) - except LegacyInstallFailure as exc: - self.install_succeeded = False - six.reraise(*exc.parent) - except Exception: - self.install_succeeded = True - raise - - self.install_succeeded = success - - if success and self.legacy_install_reason == 8368: - deprecated( - reason=( - "{} was installed using the legacy 'setup.py install' " - "method, because a wheel could not be built for it.". - format(self.name) - ), - replacement="to fix the wheel build issue reported above", - gone_in=None, - issue=8368, - ) + assert self.is_wheel + assert self.local_file_path + install_wheel( + self.name, + self.local_file_path, + scheme=scheme, + req_description=str(self.req), + pycompile=pycompile, + warn_script_location=warn_script_location, + direct_url=self.download_info if self.is_direct else None, + requested=self.user_supplied, + ) + self.install_succeeded = True -def check_invalid_constraint_type(req): - # type: (InstallRequirement) -> str +def check_invalid_constraint_type(req: InstallRequirement) -> str: # Check for unsupported forms problem = "" if not req.name: @@ -867,12 +836,39 @@ def check_invalid_constraint_type(req): "undocumented. The new implementation of the resolver no " "longer supports these forms." ), - replacement=( - "replacing the constraint with a requirement." - ), + replacement="replacing the constraint with a requirement", # No plan yet for when the new resolver becomes default gone_in=None, - issue=8210 + issue=8210, ) return problem + + +def _has_option(options: Values, reqs: List[InstallRequirement], option: str) -> bool: + if getattr(options, option, None): + return True + for req in reqs: + if getattr(req, option, None): + return True + return False + + +def check_legacy_setup_py_options( + options: Values, + reqs: List[InstallRequirement], +) -> None: + has_build_options = _has_option(options, reqs, "build_options") + has_global_options = _has_option(options, reqs, "global_options") + if has_build_options or has_global_options: + deprecated( + reason="--build-option and --global-option are deprecated.", + issue=11859, + replacement="to use --config-settings", + gone_in="23.3", + ) + logger.warning( + "Implying --no-binary=:all: due to the presence of " + "--build-option / --global-option. " + ) + options.format_control.disallow_binaries() diff --git a/env/Lib/site-packages/pip/_internal/req/req_set.py b/env/Lib/site-packages/pip/_internal/req/req_set.py index 59c58435574aabe93b84689a85e40d17089ced55..cff676017373bfacb12b937e6bea7266965fc040 100644 --- a/env/Lib/site-packages/pip/_internal/req/req_set.py +++ b/env/Lib/site-packages/pip/_internal/req/req_set.py @@ -1,191 +1,65 @@ import logging from collections import OrderedDict -from typing import Dict, Iterable, List, Optional, Tuple +from typing import Dict, List +from pip._vendor.packaging.specifiers import LegacySpecifier from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.packaging.version import LegacyVersion -from pip._internal.exceptions import InstallationError -from pip._internal.models.wheel import Wheel from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils import compatibility_tags +from pip._internal.utils.deprecation import deprecated logger = logging.getLogger(__name__) class RequirementSet: + def __init__(self, check_supported_wheels: bool = True) -> None: + """Create a RequirementSet.""" - def __init__(self, check_supported_wheels=True): - # type: (bool) -> None - """Create a RequirementSet. - """ - - self.requirements = OrderedDict() # type: Dict[str, InstallRequirement] + self.requirements: Dict[str, InstallRequirement] = OrderedDict() self.check_supported_wheels = check_supported_wheels - self.unnamed_requirements = [] # type: List[InstallRequirement] + self.unnamed_requirements: List[InstallRequirement] = [] - def __str__(self): - # type: () -> str + def __str__(self) -> str: requirements = sorted( (req for req in self.requirements.values() if not req.comes_from), key=lambda req: canonicalize_name(req.name or ""), ) - return ' '.join(str(req.req) for req in requirements) + return " ".join(str(req.req) for req in requirements) - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: requirements = sorted( self.requirements.values(), key=lambda req: canonicalize_name(req.name or ""), ) - format_string = '<{classname} object; {count} requirement(s): {reqs}>' + format_string = "<{classname} object; {count} requirement(s): {reqs}>" return format_string.format( classname=self.__class__.__name__, count=len(requirements), - reqs=', '.join(str(req.req) for req in requirements), + reqs=", ".join(str(req.req) for req in requirements), ) - def add_unnamed_requirement(self, install_req): - # type: (InstallRequirement) -> None + def add_unnamed_requirement(self, install_req: InstallRequirement) -> None: assert not install_req.name self.unnamed_requirements.append(install_req) - def add_named_requirement(self, install_req): - # type: (InstallRequirement) -> None + def add_named_requirement(self, install_req: InstallRequirement) -> None: assert install_req.name project_name = canonicalize_name(install_req.name) self.requirements[project_name] = install_req - def add_requirement( - self, - install_req, # type: InstallRequirement - parent_req_name=None, # type: Optional[str] - extras_requested=None # type: Optional[Iterable[str]] - ): - # type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]] - """Add install_req as a requirement to install. - - :param parent_req_name: The name of the requirement that needed this - added. The name is used because when multiple unnamed requirements - resolve to the same name, we could otherwise end up with dependency - links that point outside the Requirements set. parent_req must - already be added. Note that None implies that this is a user - supplied requirement, vs an inferred one. - :param extras_requested: an iterable of extras used to evaluate the - environment markers. - :return: Additional requirements to scan. That is either [] if - the requirement is not applicable, or [install_req] if the - requirement is applicable and has just been added. - """ - # If the markers do not match, ignore this requirement. - if not install_req.match_markers(extras_requested): - logger.info( - "Ignoring %s: markers '%s' don't match your environment", - install_req.name, install_req.markers, - ) - return [], None - - # If the wheel is not supported, raise an error. - # Should check this after filtering out based on environment markers to - # allow specifying different wheels based on the environment/OS, in a - # single requirements file. - if install_req.link and install_req.link.is_wheel: - wheel = Wheel(install_req.link.filename) - tags = compatibility_tags.get_supported() - if (self.check_supported_wheels and not wheel.supported(tags)): - raise InstallationError( - "{} is not a supported wheel on this platform.".format( - wheel.filename) - ) - - # This next bit is really a sanity check. - assert not install_req.user_supplied or parent_req_name is None, ( - "a user supplied req shouldn't have a parent" - ) - - # Unnamed requirements are scanned again and the requirement won't be - # added as a dependency until after scanning. - if not install_req.name: - self.add_unnamed_requirement(install_req) - return [install_req], None - - try: - existing_req = self.get_requirement( - install_req.name) # type: Optional[InstallRequirement] - except KeyError: - existing_req = None - - has_conflicting_requirement = ( - parent_req_name is None and - existing_req and - not existing_req.constraint and - existing_req.extras == install_req.extras and - existing_req.req and - install_req.req and - existing_req.req.specifier != install_req.req.specifier - ) - if has_conflicting_requirement: - raise InstallationError( - "Double requirement given: {} (already in {}, name={!r})" - .format(install_req, existing_req, install_req.name) - ) - - # When no existing requirement exists, add the requirement as a - # dependency and it will be scanned again after. - if not existing_req: - self.add_named_requirement(install_req) - # We'd want to rescan this requirement later - return [install_req], install_req - - # Assume there's no need to scan, and that we've already - # encountered this for scanning. - if install_req.constraint or not existing_req.constraint: - return [], existing_req - - does_not_satisfy_constraint = ( - install_req.link and - not ( - existing_req.link and - install_req.link.path == existing_req.link.path - ) - ) - if does_not_satisfy_constraint: - raise InstallationError( - "Could not satisfy constraints for '{}': " - "installation from path or url cannot be " - "constrained to a version".format(install_req.name) - ) - # If we're now installing a constraint, mark the existing - # object for real installation. - existing_req.constraint = False - # If we're now installing a user supplied requirement, - # mark the existing object as such. - if install_req.user_supplied: - existing_req.user_supplied = True - existing_req.extras = tuple(sorted( - set(existing_req.extras) | set(install_req.extras) - )) - logger.debug( - "Setting %s extras to: %s", - existing_req, existing_req.extras, - ) - # Return the existing requirement for addition to the parent and - # scanning again. - return [existing_req], existing_req - - def has_requirement(self, name): - # type: (str) -> bool + def has_requirement(self, name: str) -> bool: project_name = canonicalize_name(name) return ( - project_name in self.requirements and - not self.requirements[project_name].constraint + project_name in self.requirements + and not self.requirements[project_name].constraint ) - def get_requirement(self, name): - # type: (str) -> InstallRequirement + def get_requirement(self, name: str) -> InstallRequirement: project_name = canonicalize_name(name) if project_name in self.requirements: @@ -194,6 +68,52 @@ class RequirementSet: raise KeyError(f"No project with the name {name!r}") @property - def all_requirements(self): - # type: () -> List[InstallRequirement] + def all_requirements(self) -> List[InstallRequirement]: return self.unnamed_requirements + list(self.requirements.values()) + + @property + def requirements_to_install(self) -> List[InstallRequirement]: + """Return the list of requirements that need to be installed. + + TODO remove this property together with the legacy resolver, since the new + resolver only returns requirements that need to be installed. + """ + return [ + install_req + for install_req in self.all_requirements + if not install_req.constraint and not install_req.satisfied_by + ] + + def warn_legacy_versions_and_specifiers(self) -> None: + for req in self.requirements_to_install: + version = req.get_dist().version + if isinstance(version, LegacyVersion): + deprecated( + reason=( + f"pip has selected the non standard version {version} " + f"of {req}. In the future this version will be " + f"ignored as it isn't standard compliant." + ), + replacement=( + "set or update constraints to select another version " + "or contact the package author to fix the version number" + ), + issue=12063, + gone_in="23.3", + ) + for dep in req.get_dist().iter_dependencies(): + if any(isinstance(spec, LegacySpecifier) for spec in dep.specifier): + deprecated( + reason=( + f"pip has selected {req} {version} which has non " + f"standard dependency specifier {dep}. " + f"In the future this version of {req} will be " + f"ignored as it isn't standard compliant." + ), + replacement=( + "set or update constraints to select another version " + "or contact the package author to fix the version number" + ), + issue=12063, + gone_in="23.3", + ) diff --git a/env/Lib/site-packages/pip/_internal/req/req_tracker.py b/env/Lib/site-packages/pip/_internal/req/req_tracker.py deleted file mode 100644 index 542e0d94e37382c7f16591158199d6a0e115a167..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_internal/req/req_tracker.py +++ /dev/null @@ -1,140 +0,0 @@ -import contextlib -import hashlib -import logging -import os -from types import TracebackType -from typing import Dict, Iterator, Optional, Set, Type, Union - -from pip._internal.models.link import Link -from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.temp_dir import TempDirectory - -logger = logging.getLogger(__name__) - - -@contextlib.contextmanager -def update_env_context_manager(**changes): - # type: (str) -> Iterator[None] - target = os.environ - - # Save values from the target and change them. - non_existent_marker = object() - saved_values = {} # type: Dict[str, Union[object, str]] - for name, new_value in changes.items(): - try: - saved_values[name] = target[name] - except KeyError: - saved_values[name] = non_existent_marker - target[name] = new_value - - try: - yield - finally: - # Restore original values in the target. - for name, original_value in saved_values.items(): - if original_value is non_existent_marker: - del target[name] - else: - assert isinstance(original_value, str) # for mypy - target[name] = original_value - - -@contextlib.contextmanager -def get_requirement_tracker(): - # type: () -> Iterator[RequirementTracker] - root = os.environ.get('PIP_REQ_TRACKER') - with contextlib.ExitStack() as ctx: - if root is None: - root = ctx.enter_context( - TempDirectory(kind='req-tracker') - ).path - ctx.enter_context(update_env_context_manager(PIP_REQ_TRACKER=root)) - logger.debug("Initialized build tracking at %s", root) - - with RequirementTracker(root) as tracker: - yield tracker - - -class RequirementTracker: - - def __init__(self, root): - # type: (str) -> None - self._root = root - self._entries = set() # type: Set[InstallRequirement] - logger.debug("Created build tracker: %s", self._root) - - def __enter__(self): - # type: () -> RequirementTracker - logger.debug("Entered build tracker: %s", self._root) - return self - - def __exit__( - self, - exc_type, # type: Optional[Type[BaseException]] - exc_val, # type: Optional[BaseException] - exc_tb # type: Optional[TracebackType] - ): - # type: (...) -> None - self.cleanup() - - def _entry_path(self, link): - # type: (Link) -> str - hashed = hashlib.sha224(link.url_without_fragment.encode()).hexdigest() - return os.path.join(self._root, hashed) - - def add(self, req): - # type: (InstallRequirement) -> None - """Add an InstallRequirement to build tracking. - """ - - assert req.link - # Get the file to write information about this requirement. - entry_path = self._entry_path(req.link) - - # Try reading from the file. If it exists and can be read from, a build - # is already in progress, so a LookupError is raised. - try: - with open(entry_path) as fp: - contents = fp.read() - except FileNotFoundError: - pass - else: - message = '{} is already being built: {}'.format( - req.link, contents) - raise LookupError(message) - - # If we're here, req should really not be building already. - assert req not in self._entries - - # Start tracking this requirement. - with open(entry_path, 'w', encoding="utf-8") as fp: - fp.write(str(req)) - self._entries.add(req) - - logger.debug('Added %s to build tracker %r', req, self._root) - - def remove(self, req): - # type: (InstallRequirement) -> None - """Remove an InstallRequirement from build tracking. - """ - - assert req.link - # Delete the created file and the corresponding entries. - os.unlink(self._entry_path(req.link)) - self._entries.remove(req) - - logger.debug('Removed %s from build tracker %r', req, self._root) - - def cleanup(self): - # type: () -> None - for req in set(self._entries): - self.remove(req) - - logger.debug("Removed build tracker: %r", self._root) - - @contextlib.contextmanager - def track(self, req): - # type: (InstallRequirement) -> Iterator[None] - self.add(req) - yield - self.remove(req) diff --git a/env/Lib/site-packages/pip/_internal/req/req_uninstall.py b/env/Lib/site-packages/pip/_internal/req/req_uninstall.py index b72234175b28323836cd211bb4966dafccd98485..ad5178e76ff9245ca515fd826ab51907956f8591 100644 --- a/env/Lib/site-packages/pip/_internal/req/req_uninstall.py +++ b/env/Lib/site-packages/pip/_internal/req/req_uninstall.py @@ -1,72 +1,58 @@ -import csv import functools -import logging import os import sys import sysconfig from importlib.util import cache_from_source -from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Set, Tuple - -from pip._vendor import pkg_resources -from pip._vendor.pkg_resources import Distribution +from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Set, Tuple from pip._internal.exceptions import UninstallationError from pip._internal.locations import get_bin_prefix, get_bin_user +from pip._internal.metadata import BaseDistribution from pip._internal.utils.compat import WINDOWS -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import ( - ask, - dist_in_usersite, - dist_is_local, - egg_link_path, - is_local, - normalize_path, - renames, - rmtree, -) +from pip._internal.utils.egg_link import egg_link_path_from_location +from pip._internal.utils.logging import getLogger, indent_log +from pip._internal.utils.misc import ask, normalize_path, renames, rmtree from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory +from pip._internal.utils.virtualenv import running_under_virtualenv -logger = logging.getLogger(__name__) +logger = getLogger(__name__) -def _script_names(dist, script_name, is_gui): - # type: (Distribution, str, bool) -> List[str] +def _script_names( + bin_dir: str, script_name: str, is_gui: bool +) -> Generator[str, None, None]: """Create the fully qualified name of the files created by {console,gui}_scripts for the given ``dist``. Returns the list of file names """ - if dist_in_usersite(dist): - bin_dir = get_bin_user() - else: - bin_dir = get_bin_prefix() exe_name = os.path.join(bin_dir, script_name) - paths_to_remove = [exe_name] - if WINDOWS: - paths_to_remove.append(exe_name + '.exe') - paths_to_remove.append(exe_name + '.exe.manifest') - if is_gui: - paths_to_remove.append(exe_name + '-script.pyw') - else: - paths_to_remove.append(exe_name + '-script.py') - return paths_to_remove + yield exe_name + if not WINDOWS: + return + yield f"{exe_name}.exe" + yield f"{exe_name}.exe.manifest" + if is_gui: + yield f"{exe_name}-script.pyw" + else: + yield f"{exe_name}-script.py" -def _unique(fn): - # type: (Callable[..., Iterator[Any]]) -> Callable[..., Iterator[Any]] +def _unique( + fn: Callable[..., Generator[Any, None, None]] +) -> Callable[..., Generator[Any, None, None]]: @functools.wraps(fn) - def unique(*args, **kw): - # type: (Any, Any) -> Iterator[Any] - seen = set() # type: Set[Any] + def unique(*args: Any, **kw: Any) -> Generator[Any, None, None]: + seen: Set[Any] = set() for item in fn(*args, **kw): if item not in seen: seen.add(item) yield item + return unique @_unique -def uninstallation_paths(dist): - # type: (Distribution) -> Iterator[str] +def uninstallation_paths(dist: BaseDistribution) -> Generator[str, None, None]: """ Yield all the uninstallation paths for dist based on RECORD-without-.py[co] @@ -74,33 +60,53 @@ def uninstallation_paths(dist): the .pyc and .pyo in the same directory. UninstallPathSet.add() takes care of the __pycache__ .py[co]. + + If RECORD is not found, raises UninstallationError, + with possible information from the INSTALLER file. + + https://packaging.python.org/specifications/recording-installed-packages/ """ - r = csv.reader(dist.get_metadata_lines('RECORD')) - for row in r: - path = os.path.join(dist.location, row[0]) + location = dist.location + assert location is not None, "not installed" + + entries = dist.iter_declared_entries() + if entries is None: + msg = "Cannot uninstall {dist}, RECORD file not found.".format(dist=dist) + installer = dist.installer + if not installer or installer == "pip": + dep = "{}=={}".format(dist.raw_name, dist.version) + msg += ( + " You might be able to recover from this via: " + "'pip install --force-reinstall --no-deps {}'.".format(dep) + ) + else: + msg += " Hint: The package was installed by {}.".format(installer) + raise UninstallationError(msg) + + for entry in entries: + path = os.path.join(location, entry) yield path - if path.endswith('.py'): + if path.endswith(".py"): dn, fn = os.path.split(path) base = fn[:-3] - path = os.path.join(dn, base + '.pyc') + path = os.path.join(dn, base + ".pyc") yield path - path = os.path.join(dn, base + '.pyo') + path = os.path.join(dn, base + ".pyo") yield path -def compact(paths): - # type: (Iterable[str]) -> Set[str] +def compact(paths: Iterable[str]) -> Set[str]: """Compact a path set to contain the minimal number of paths necessary to contain all paths in the set. If /a/path/ and /a/path/to/a/file.txt are both in the set, leave only the shorter path.""" sep = os.path.sep - short_paths = set() # type: Set[str] + short_paths: Set[str] = set() for path in sorted(paths, key=len): should_skip = any( - path.startswith(shortpath.rstrip("*")) and - path[len(shortpath.rstrip("*").rstrip(sep))] == sep + path.startswith(shortpath.rstrip("*")) + and path[len(shortpath.rstrip("*").rstrip(sep))] == sep for shortpath in short_paths ) if not should_skip: @@ -108,8 +114,7 @@ def compact(paths): return short_paths -def compress_for_rename(paths): - # type: (Iterable[str]) -> Set[str] +def compress_for_rename(paths: Iterable[str]) -> Set[str]: """Returns a set containing the paths that need to be renamed. This set may include directories when the original sequence of paths @@ -118,25 +123,21 @@ def compress_for_rename(paths): case_map = {os.path.normcase(p): p for p in paths} remaining = set(case_map) unchecked = sorted({os.path.split(p)[0] for p in case_map.values()}, key=len) - wildcards = set() # type: Set[str] + wildcards: Set[str] = set() - def norm_join(*a): - # type: (str) -> str + def norm_join(*a: str) -> str: return os.path.normcase(os.path.join(*a)) for root in unchecked: - if any(os.path.normcase(root).startswith(w) - for w in wildcards): + if any(os.path.normcase(root).startswith(w) for w in wildcards): # This directory has already been handled. continue - all_files = set() # type: Set[str] - all_subdirs = set() # type: Set[str] + all_files: Set[str] = set() + all_subdirs: Set[str] = set() for dirname, subdirs, files in os.walk(root): - all_subdirs.update(norm_join(root, dirname, d) - for d in subdirs) - all_files.update(norm_join(root, dirname, f) - for f in files) + all_subdirs.update(norm_join(root, dirname, d) for d in subdirs) + all_files.update(norm_join(root, dirname, f) for f in files) # If all the files we found are in our remaining set of files to # remove, then remove them from the latter set and add a wildcard # for the directory. @@ -147,8 +148,7 @@ def compress_for_rename(paths): return set(map(case_map.__getitem__, remaining)) | wildcards -def compress_for_output_listing(paths): - # type: (Iterable[str]) -> Tuple[Set[str], Set[str]] +def compress_for_output_listing(paths: Iterable[str]) -> Tuple[Set[str], Set[str]]: """Returns a tuple of 2 sets of which paths to display to user The first set contains paths that would be deleted. Files of a package @@ -186,14 +186,14 @@ def compress_for_output_listing(paths): continue file_ = os.path.join(dirpath, fname) - if (os.path.isfile(file_) and - os.path.normcase(file_) not in _normcased_files): + if ( + os.path.isfile(file_) + and os.path.normcase(file_) not in _normcased_files + ): # We are skipping this file. Add it to the set. will_skip.add(file_) - will_remove = files | { - os.path.join(folder, "*") for folder in folders - } + will_remove = files | {os.path.join(folder, "*") for folder in folders} return will_remove, will_skip @@ -201,32 +201,30 @@ def compress_for_output_listing(paths): class StashedUninstallPathSet: """A set of file rename operations to stash files while tentatively uninstalling them.""" - def __init__(self): - # type: () -> None + + def __init__(self) -> None: # Mapping from source file root to [Adjacent]TempDirectory # for files under that directory. - self._save_dirs = {} # type: Dict[str, TempDirectory] + self._save_dirs: Dict[str, TempDirectory] = {} # (old path, new path) tuples for each move that may need # to be undone. - self._moves = [] # type: List[Tuple[str, str]] + self._moves: List[Tuple[str, str]] = [] - def _get_directory_stash(self, path): - # type: (str) -> str + def _get_directory_stash(self, path: str) -> str: """Stashes a directory. Directories are stashed adjacent to their original location if possible, or else moved/copied into the user's temp dir.""" try: - save_dir = AdjacentTempDirectory(path) # type: TempDirectory + save_dir: TempDirectory = AdjacentTempDirectory(path) except OSError: save_dir = TempDirectory(kind="uninstall") self._save_dirs[os.path.normcase(path)] = save_dir return save_dir.path - def _get_file_stash(self, path): - # type: (str) -> str + def _get_file_stash(self, path: str) -> str: """Stashes a file. If no root has been provided, one will be created for the directory @@ -245,7 +243,7 @@ class StashedUninstallPathSet: else: # Did not find any suitable root head = os.path.dirname(path) - save_dir = TempDirectory(kind='uninstall') + save_dir = TempDirectory(kind="uninstall") self._save_dirs[head] = save_dir relpath = os.path.relpath(path, head) @@ -253,8 +251,7 @@ class StashedUninstallPathSet: return os.path.join(save_dir.path, relpath) return save_dir.path - def stash(self, path): - # type: (str) -> str + def stash(self, path: str) -> str: """Stashes the directory or file and returns its new location. Handle symlinks as files to avoid modifying the symlink targets. """ @@ -265,7 +262,7 @@ class StashedUninstallPathSet: new_path = self._get_file_stash(path) self._moves.append((path, new_path)) - if (path_is_dir and os.path.isdir(new_path)): + if path_is_dir and os.path.isdir(new_path): # If we're moving a directory, we need to # remove the destination first or else it will be # moved to inside the existing directory. @@ -275,23 +272,21 @@ class StashedUninstallPathSet: renames(path, new_path) return new_path - def commit(self): - # type: () -> None + def commit(self) -> None: """Commits the uninstall by removing stashed files.""" for _, save_dir in self._save_dirs.items(): save_dir.cleanup() self._moves = [] self._save_dirs = {} - def rollback(self): - # type: () -> None + def rollback(self) -> None: """Undoes the uninstall by moving stashed files back.""" for p in self._moves: logger.info("Moving to %s\n from %s", *p) for new_path, path in self._moves: try: - logger.debug('Replacing %s from %s', new_path, path) + logger.debug("Replacing %s from %s", new_path, path) if os.path.isfile(new_path) or os.path.islink(new_path): os.unlink(new_path) elif os.path.isdir(new_path): @@ -304,100 +299,97 @@ class StashedUninstallPathSet: self.commit() @property - def can_rollback(self): - # type: () -> bool + def can_rollback(self) -> bool: return bool(self._moves) class UninstallPathSet: """A set of file paths to be removed in the uninstallation of a requirement.""" - def __init__(self, dist): - # type: (Distribution) -> None - self.paths = set() # type: Set[str] - self._refuse = set() # type: Set[str] - self.pth = {} # type: Dict[str, UninstallPthEntries] - self.dist = dist + + def __init__(self, dist: BaseDistribution) -> None: + self._paths: Set[str] = set() + self._refuse: Set[str] = set() + self._pth: Dict[str, UninstallPthEntries] = {} + self._dist = dist self._moved_paths = StashedUninstallPathSet() + # Create local cache of normalize_path results. Creating an UninstallPathSet + # can result in hundreds/thousands of redundant calls to normalize_path with + # the same args, which hurts performance. + self._normalize_path_cached = functools.lru_cache()(normalize_path) - def _permitted(self, path): - # type: (str) -> bool + def _permitted(self, path: str) -> bool: """ Return True if the given path is one we are permitted to remove/modify, False otherwise. """ - return is_local(path) + # aka is_local, but caching normalized sys.prefix + if not running_under_virtualenv(): + return True + return path.startswith(self._normalize_path_cached(sys.prefix)) - def add(self, path): - # type: (str) -> None + def add(self, path: str) -> None: head, tail = os.path.split(path) # we normalize the head to resolve parent directory symlinks, but not # the tail, since we only want to uninstall symlinks, not their targets - path = os.path.join(normalize_path(head), os.path.normcase(tail)) + path = os.path.join(self._normalize_path_cached(head), os.path.normcase(tail)) if not os.path.exists(path): return if self._permitted(path): - self.paths.add(path) + self._paths.add(path) else: self._refuse.add(path) # __pycache__ files can show up after 'installed-files.txt' is created, # due to imports - if os.path.splitext(path)[1] == '.py': + if os.path.splitext(path)[1] == ".py": self.add(cache_from_source(path)) - def add_pth(self, pth_file, entry): - # type: (str, str) -> None - pth_file = normalize_path(pth_file) + def add_pth(self, pth_file: str, entry: str) -> None: + pth_file = self._normalize_path_cached(pth_file) if self._permitted(pth_file): - if pth_file not in self.pth: - self.pth[pth_file] = UninstallPthEntries(pth_file) - self.pth[pth_file].add(entry) + if pth_file not in self._pth: + self._pth[pth_file] = UninstallPthEntries(pth_file) + self._pth[pth_file].add(entry) else: self._refuse.add(pth_file) - def remove(self, auto_confirm=False, verbose=False): - # type: (bool, bool) -> None - """Remove paths in ``self.paths`` with confirmation (unless + def remove(self, auto_confirm: bool = False, verbose: bool = False) -> None: + """Remove paths in ``self._paths`` with confirmation (unless ``auto_confirm`` is True).""" - if not self.paths: + if not self._paths: logger.info( "Can't uninstall '%s'. No files were found to uninstall.", - self.dist.project_name, + self._dist.raw_name, ) return - dist_name_version = ( - self.dist.project_name + "-" + self.dist.version - ) - logger.info('Uninstalling %s:', dist_name_version) + dist_name_version = f"{self._dist.raw_name}-{self._dist.version}" + logger.info("Uninstalling %s:", dist_name_version) with indent_log(): if auto_confirm or self._allowed_to_proceed(verbose): moved = self._moved_paths - for_rename = compress_for_rename(self.paths) + for_rename = compress_for_rename(self._paths) for path in sorted(compact(for_rename)): moved.stash(path) - logger.debug('Removing file or directory %s', path) + logger.verbose("Removing file or directory %s", path) - for pth in self.pth.values(): + for pth in self._pth.values(): pth.remove() - logger.info('Successfully uninstalled %s', dist_name_version) + logger.info("Successfully uninstalled %s", dist_name_version) - def _allowed_to_proceed(self, verbose): - # type: (bool) -> bool - """Display which files would be deleted and prompt for confirmation - """ + def _allowed_to_proceed(self, verbose: bool) -> bool: + """Display which files would be deleted and prompt for confirmation""" - def _display(msg, paths): - # type: (str, Iterable[str]) -> None + def _display(msg: str, paths: Iterable[str]) -> None: if not paths: return @@ -407,182 +399,206 @@ class UninstallPathSet: logger.info(path) if not verbose: - will_remove, will_skip = compress_for_output_listing(self.paths) + will_remove, will_skip = compress_for_output_listing(self._paths) else: # In verbose mode, display all the files that are going to be # deleted. - will_remove = set(self.paths) + will_remove = set(self._paths) will_skip = set() - _display('Would remove:', will_remove) - _display('Would not remove (might be manually added):', will_skip) - _display('Would not remove (outside of prefix):', self._refuse) + _display("Would remove:", will_remove) + _display("Would not remove (might be manually added):", will_skip) + _display("Would not remove (outside of prefix):", self._refuse) if verbose: - _display('Will actually move:', compress_for_rename(self.paths)) + _display("Will actually move:", compress_for_rename(self._paths)) - return ask('Proceed (y/n)? ', ('y', 'n')) == 'y' + return ask("Proceed (Y/n)? ", ("y", "n", "")) != "n" - def rollback(self): - # type: () -> None + def rollback(self) -> None: """Rollback the changes previously made by remove().""" if not self._moved_paths.can_rollback: logger.error( "Can't roll back %s; was not uninstalled", - self.dist.project_name, + self._dist.raw_name, ) return - logger.info('Rolling back uninstall of %s', self.dist.project_name) + logger.info("Rolling back uninstall of %s", self._dist.raw_name) self._moved_paths.rollback() - for pth in self.pth.values(): + for pth in self._pth.values(): pth.rollback() - def commit(self): - # type: () -> None + def commit(self) -> None: """Remove temporary save dir: rollback will no longer be possible.""" self._moved_paths.commit() @classmethod - def from_dist(cls, dist): - # type: (Distribution) -> UninstallPathSet - dist_path = normalize_path(dist.location) - if not dist_is_local(dist): + def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet": + dist_location = dist.location + info_location = dist.info_location + if dist_location is None: + logger.info( + "Not uninstalling %s since it is not installed", + dist.canonical_name, + ) + return cls(dist) + + normalized_dist_location = normalize_path(dist_location) + if not dist.local: logger.info( "Not uninstalling %s at %s, outside environment %s", - dist.key, - dist_path, + dist.canonical_name, + normalized_dist_location, sys.prefix, ) return cls(dist) - if dist_path in {p for p in {sysconfig.get_path("stdlib"), - sysconfig.get_path("platstdlib")} - if p}: + if normalized_dist_location in { + p + for p in {sysconfig.get_path("stdlib"), sysconfig.get_path("platstdlib")} + if p + }: logger.info( "Not uninstalling %s at %s, as it is in the standard library.", - dist.key, - dist_path, + dist.canonical_name, + normalized_dist_location, ) return cls(dist) paths_to_remove = cls(dist) - develop_egg_link = egg_link_path(dist) - develop_egg_link_egg_info = '{}.egg-info'.format( - pkg_resources.to_filename(dist.project_name)) - egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info) - # Special case for distutils installed package - distutils_egg_info = getattr(dist._provider, 'path', None) + develop_egg_link = egg_link_path_from_location(dist.raw_name) + + # Distribution is installed with metadata in a "flat" .egg-info + # directory. This means it is not a modern .dist-info installation, an + # egg, or legacy editable. + setuptools_flat_installation = ( + dist.installed_with_setuptools_egg_info + and info_location is not None + and os.path.exists(info_location) + # If dist is editable and the location points to a ``.egg-info``, + # we are in fact in the legacy editable case. + and not info_location.endswith(f"{dist.setuptools_filename}.egg-info") + ) # Uninstall cases order do matter as in the case of 2 installs of the # same package, pip needs to uninstall the currently detected version - if (egg_info_exists and dist.egg_info.endswith('.egg-info') and - not dist.egg_info.endswith(develop_egg_link_egg_info)): - # if dist.egg_info.endswith(develop_egg_link_egg_info), we - # are in fact in the develop_egg_link case - paths_to_remove.add(dist.egg_info) - if dist.has_metadata('installed-files.txt'): - for installed_file in dist.get_metadata( - 'installed-files.txt').splitlines(): - path = os.path.normpath( - os.path.join(dist.egg_info, installed_file) - ) - paths_to_remove.add(path) + if setuptools_flat_installation: + if info_location is not None: + paths_to_remove.add(info_location) + installed_files = dist.iter_declared_entries() + if installed_files is not None: + for installed_file in installed_files: + paths_to_remove.add(os.path.join(dist_location, installed_file)) # FIXME: need a test for this elif block # occurs with --single-version-externally-managed/--record outside # of pip - elif dist.has_metadata('top_level.txt'): - if dist.has_metadata('namespace_packages.txt'): - namespaces = dist.get_metadata('namespace_packages.txt') - else: + elif dist.is_file("top_level.txt"): + try: + namespace_packages = dist.read_text("namespace_packages.txt") + except FileNotFoundError: namespaces = [] + else: + namespaces = namespace_packages.splitlines(keepends=False) for top_level_pkg in [ - p for p - in dist.get_metadata('top_level.txt').splitlines() - if p and p not in namespaces]: - path = os.path.join(dist.location, top_level_pkg) + p + for p in dist.read_text("top_level.txt").splitlines() + if p and p not in namespaces + ]: + path = os.path.join(dist_location, top_level_pkg) paths_to_remove.add(path) - paths_to_remove.add(path + '.py') - paths_to_remove.add(path + '.pyc') - paths_to_remove.add(path + '.pyo') + paths_to_remove.add(f"{path}.py") + paths_to_remove.add(f"{path}.pyc") + paths_to_remove.add(f"{path}.pyo") - elif distutils_egg_info: + elif dist.installed_by_distutils: raise UninstallationError( "Cannot uninstall {!r}. It is a distutils installed project " "and thus we cannot accurately determine which files belong " "to it which would lead to only a partial uninstall.".format( - dist.project_name, + dist.raw_name, ) ) - elif dist.location.endswith('.egg'): + elif dist.installed_as_egg: # package installed by easy_install # We cannot match on dist.egg_name because it can slightly vary # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg - paths_to_remove.add(dist.location) - easy_install_egg = os.path.split(dist.location)[1] - easy_install_pth = os.path.join(os.path.dirname(dist.location), - 'easy-install.pth') - paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg) + paths_to_remove.add(dist_location) + easy_install_egg = os.path.split(dist_location)[1] + easy_install_pth = os.path.join( + os.path.dirname(dist_location), + "easy-install.pth", + ) + paths_to_remove.add_pth(easy_install_pth, "./" + easy_install_egg) - elif egg_info_exists and dist.egg_info.endswith('.dist-info'): + elif dist.installed_with_dist_info: for path in uninstallation_paths(dist): paths_to_remove.add(path) elif develop_egg_link: - # develop egg + # PEP 660 modern editable is handled in the ``.dist-info`` case + # above, so this only covers the setuptools-style editable. with open(develop_egg_link) as fh: link_pointer = os.path.normcase(fh.readline().strip()) - assert (link_pointer == dist.location), ( - 'Egg-link {} does not match installed location of {} ' - '(at {})'.format( - link_pointer, dist.project_name, dist.location) + normalized_link_pointer = paths_to_remove._normalize_path_cached( + link_pointer + ) + assert os.path.samefile( + normalized_link_pointer, normalized_dist_location + ), ( + f"Egg-link {develop_egg_link} (to {link_pointer}) does not match " + f"installed location of {dist.raw_name} (at {dist_location})" ) paths_to_remove.add(develop_egg_link) - easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), - 'easy-install.pth') - paths_to_remove.add_pth(easy_install_pth, dist.location) + easy_install_pth = os.path.join( + os.path.dirname(develop_egg_link), "easy-install.pth" + ) + paths_to_remove.add_pth(easy_install_pth, dist_location) else: logger.debug( - 'Not sure how to uninstall: %s - Check: %s', - dist, dist.location, + "Not sure how to uninstall: %s - Check: %s", + dist, + dist_location, ) + if dist.in_usersite: + bin_dir = get_bin_user() + else: + bin_dir = get_bin_prefix() + # find distutils scripts= scripts - if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): - for script in dist.metadata_listdir('scripts'): - if dist_in_usersite(dist): - bin_dir = get_bin_user() - else: - bin_dir = get_bin_prefix() + try: + for script in dist.iter_distutils_script_names(): paths_to_remove.add(os.path.join(bin_dir, script)) if WINDOWS: - paths_to_remove.add(os.path.join(bin_dir, script) + '.bat') - - # find console_scripts - _scripts_to_remove = [] - console_scripts = dist.get_entry_map(group='console_scripts') - for name in console_scripts.keys(): - _scripts_to_remove.extend(_script_names(dist, name, False)) - # find gui_scripts - gui_scripts = dist.get_entry_map(group='gui_scripts') - for name in gui_scripts.keys(): - _scripts_to_remove.extend(_script_names(dist, name, True)) - - for s in _scripts_to_remove: + paths_to_remove.add(os.path.join(bin_dir, f"{script}.bat")) + except (FileNotFoundError, NotADirectoryError): + pass + + # find console_scripts and gui_scripts + def iter_scripts_to_remove( + dist: BaseDistribution, + bin_dir: str, + ) -> Generator[str, None, None]: + for entry_point in dist.iter_entry_points(): + if entry_point.group == "console_scripts": + yield from _script_names(bin_dir, entry_point.name, False) + elif entry_point.group == "gui_scripts": + yield from _script_names(bin_dir, entry_point.name, True) + + for s in iter_scripts_to_remove(dist, bin_dir): paths_to_remove.add(s) return paths_to_remove class UninstallPthEntries: - def __init__(self, pth_file): - # type: (str) -> None + def __init__(self, pth_file: str) -> None: self.file = pth_file - self.entries = set() # type: Set[str] - self._saved_lines = None # type: Optional[List[bytes]] + self.entries: Set[str] = set() + self._saved_lines: Optional[List[bytes]] = None - def add(self, entry): - # type: (str) -> None + def add(self, entry: str) -> None: entry = os.path.normcase(entry) # On Windows, os.path.normcase converts the entry to use # backslashes. This is correct for entries that describe absolute @@ -594,47 +610,41 @@ class UninstallPthEntries: # have more than "\\sever\share". Valid examples: "\\server\share\" or # "\\server\share\folder". if WINDOWS and not os.path.splitdrive(entry)[0]: - entry = entry.replace('\\', '/') + entry = entry.replace("\\", "/") self.entries.add(entry) - def remove(self): - # type: () -> None - logger.debug('Removing pth entries from %s:', self.file) + def remove(self) -> None: + logger.verbose("Removing pth entries from %s:", self.file) # If the file doesn't exist, log a warning and return if not os.path.isfile(self.file): - logger.warning( - "Cannot remove entries from nonexistent file %s", self.file - ) + logger.warning("Cannot remove entries from nonexistent file %s", self.file) return - with open(self.file, 'rb') as fh: + with open(self.file, "rb") as fh: # windows uses '\r\n' with py3k, but uses '\n' with py2.x lines = fh.readlines() self._saved_lines = lines - if any(b'\r\n' in line for line in lines): - endline = '\r\n' + if any(b"\r\n" in line for line in lines): + endline = "\r\n" else: - endline = '\n' + endline = "\n" # handle missing trailing newline if lines and not lines[-1].endswith(endline.encode("utf-8")): lines[-1] = lines[-1] + endline.encode("utf-8") for entry in self.entries: try: - logger.debug('Removing entry: %s', entry) + logger.verbose("Removing entry: %s", entry) lines.remove((entry + endline).encode("utf-8")) except ValueError: pass - with open(self.file, 'wb') as fh: + with open(self.file, "wb") as fh: fh.writelines(lines) - def rollback(self): - # type: () -> bool + def rollback(self) -> bool: if self._saved_lines is None: - logger.error( - 'Cannot roll back changes to %s, none were made', self.file - ) + logger.error("Cannot roll back changes to %s, none were made", self.file) return False - logger.debug('Rolling %s back to previous state', self.file) - with open(self.file, 'wb') as fh: + logger.debug("Rolling %s back to previous state", self.file) + with open(self.file, "wb") as fh: fh.writelines(self._saved_lines) return True diff --git a/env/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 67cb3e32a8e33f9b46accd2807416b3b289d311a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-39.pyc deleted file mode 100644 index 2c19d10d12f8efa3a2b94e9a2a325a3488bcb2c6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/__pycache__/base.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/base.py b/env/Lib/site-packages/pip/_internal/resolution/base.py index 1be0cb279a0e989acddb98382a12c4f792ba0c5b..42dade18c1ec2b825f756dad4aaa89f2d9e6ce21 100644 --- a/env/Lib/site-packages/pip/_internal/resolution/base.py +++ b/env/Lib/site-packages/pip/_internal/resolution/base.py @@ -1,16 +1,20 @@ -from typing import Callable, List +from typing import Callable, List, Optional from pip._internal.req.req_install import InstallRequirement from pip._internal.req.req_set import RequirementSet -InstallRequirementProvider = Callable[[str, InstallRequirement], InstallRequirement] +InstallRequirementProvider = Callable[ + [str, Optional[InstallRequirement]], InstallRequirement +] class BaseResolver: - def resolve(self, root_reqs, check_supported_wheels): - # type: (List[InstallRequirement], bool) -> RequirementSet + def resolve( + self, root_reqs: List[InstallRequirement], check_supported_wheels: bool + ) -> RequirementSet: raise NotImplementedError() - def get_installation_order(self, req_set): - # type: (RequirementSet) -> List[InstallRequirement] + def get_installation_order( + self, req_set: RequirementSet + ) -> List[InstallRequirement]: raise NotImplementedError() diff --git a/env/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index ea3a4b2153a625cdb95fd39d873f5ed338e4668a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-39.pyc deleted file mode 100644 index 5af41912d9c3ae6880175189d955c46494e0023b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/legacy/__pycache__/resolver.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py b/env/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py index 17de7f09a37b0dca9c34e40b5e8985b653af2d99..b17b7e4530b185a4011f4dc3211ddedd6d6587aa 100644 --- a/env/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py +++ b/env/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py @@ -20,7 +20,7 @@ from itertools import chain from typing import DefaultDict, Iterable, List, Optional, Set, Tuple from pip._vendor.packaging import specifiers -from pip._vendor.pkg_resources import Distribution +from pip._vendor.packaging.requirements import Requirement from pip._internal.cache import WheelCache from pip._internal.exceptions import ( @@ -28,10 +28,14 @@ from pip._internal.exceptions import ( DistributionNotFound, HashError, HashErrors, + InstallationError, + NoneMetadataError, UnsupportedPythonVersion, ) from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import BaseDistribution from pip._internal.models.link import Link +from pip._internal.models.wheel import Wheel from pip._internal.operations.prepare import RequirementPreparer from pip._internal.req.req_install import ( InstallRequirement, @@ -39,10 +43,12 @@ from pip._internal.req.req_install import ( ) from pip._internal.req.req_set import RequirementSet from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider +from pip._internal.utils import compatibility_tags from pip._internal.utils.compatibility_tags import get_supported +from pip._internal.utils.direct_url_helpers import direct_url_from_link from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import dist_in_usersite, normalize_version_info -from pip._internal.utils.packaging import check_requires_python, get_requires_python +from pip._internal.utils.misc import normalize_version_info +from pip._internal.utils.packaging import check_requires_python logger = logging.getLogger(__name__) @@ -50,11 +56,10 @@ DiscoveredDependencies = DefaultDict[str, List[InstallRequirement]] def _check_dist_requires_python( - dist, # type: Distribution - version_info, # type: Tuple[int, int, int] - ignore_requires_python=False, # type: bool -): - # type: (...) -> None + dist: BaseDistribution, + version_info: Tuple[int, int, int], + ignore_requires_python: bool = False, +) -> None: """ Check whether the given Python version is compatible with a distribution's "Requires-Python" value. @@ -67,14 +72,21 @@ def _check_dist_requires_python( :raises UnsupportedPythonVersion: When the given Python version isn't compatible. """ - requires_python = get_requires_python(dist) + # This idiosyncratically converts the SpecifierSet to str and let + # check_requires_python then parse it again into SpecifierSet. But this + # is the legacy resolver so I'm just not going to bother refactoring. + try: + requires_python = str(dist.requires_python) + except FileNotFoundError as e: + raise NoneMetadataError(dist, str(e)) try: is_compatible = check_requires_python( - requires_python, version_info=version_info + requires_python, + version_info=version_info, ) except specifiers.InvalidSpecifier as exc: logger.warning( - "Package %r has an invalid Requires-Python: %s", dist.project_name, exc + "Package %r has an invalid Requires-Python: %s", dist.raw_name, exc ) return @@ -84,8 +96,8 @@ def _check_dist_requires_python( version = ".".join(map(str, version_info)) if ignore_requires_python: logger.debug( - "Ignoring failed Requires-Python check for package %r: " "%s not in %r", - dist.project_name, + "Ignoring failed Requires-Python check for package %r: %s not in %r", + dist.raw_name, version, requires_python, ) @@ -93,7 +105,7 @@ def _check_dist_requires_python( raise UnsupportedPythonVersion( "Package {!r} requires a different Python: {} not in {!r}".format( - dist.project_name, version, requires_python + dist.raw_name, version, requires_python ) ) @@ -107,19 +119,18 @@ class Resolver(BaseResolver): def __init__( self, - preparer, # type: RequirementPreparer - finder, # type: PackageFinder - wheel_cache, # type: Optional[WheelCache] - make_install_req, # type: InstallRequirementProvider - use_user_site, # type: bool - ignore_dependencies, # type: bool - ignore_installed, # type: bool - ignore_requires_python, # type: bool - force_reinstall, # type: bool - upgrade_strategy, # type: str - py_version_info=None, # type: Optional[Tuple[int, ...]] - ): - # type: (...) -> None + preparer: RequirementPreparer, + finder: PackageFinder, + wheel_cache: Optional[WheelCache], + make_install_req: InstallRequirementProvider, + use_user_site: bool, + ignore_dependencies: bool, + ignore_installed: bool, + ignore_requires_python: bool, + force_reinstall: bool, + upgrade_strategy: str, + py_version_info: Optional[Tuple[int, ...]] = None, + ) -> None: super().__init__() assert upgrade_strategy in self._allowed_strategies @@ -142,12 +153,11 @@ class Resolver(BaseResolver): self.use_user_site = use_user_site self._make_install_req = make_install_req - self._discovered_dependencies = defaultdict( - list - ) # type: DiscoveredDependencies + self._discovered_dependencies: DiscoveredDependencies = defaultdict(list) - def resolve(self, root_reqs, check_supported_wheels): - # type: (List[InstallRequirement], bool) -> RequirementSet + def resolve( + self, root_reqs: List[InstallRequirement], check_supported_wheels: bool + ) -> RequirementSet: """Resolve what operations need to be done As a side-effect of this method, the packages (and their dependencies) @@ -162,13 +172,13 @@ class Resolver(BaseResolver): for req in root_reqs: if req.constraint: check_invalid_constraint_type(req) - requirement_set.add_requirement(req) + self._add_requirement_to_set(requirement_set, req) # Actually prepare the files, and collect any exceptions. Most hash # exceptions cannot be checked ahead of time, because # _populate_link() needs to be called before we can make decisions # based on link type. - discovered_reqs = [] # type: List[InstallRequirement] + discovered_reqs: List[InstallRequirement] = [] hash_errors = HashErrors() for req in chain(requirement_set.all_requirements, discovered_reqs): try: @@ -182,8 +192,125 @@ class Resolver(BaseResolver): return requirement_set - def _is_upgrade_allowed(self, req): - # type: (InstallRequirement) -> bool + def _add_requirement_to_set( + self, + requirement_set: RequirementSet, + install_req: InstallRequirement, + parent_req_name: Optional[str] = None, + extras_requested: Optional[Iterable[str]] = None, + ) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]: + """Add install_req as a requirement to install. + + :param parent_req_name: The name of the requirement that needed this + added. The name is used because when multiple unnamed requirements + resolve to the same name, we could otherwise end up with dependency + links that point outside the Requirements set. parent_req must + already be added. Note that None implies that this is a user + supplied requirement, vs an inferred one. + :param extras_requested: an iterable of extras used to evaluate the + environment markers. + :return: Additional requirements to scan. That is either [] if + the requirement is not applicable, or [install_req] if the + requirement is applicable and has just been added. + """ + # If the markers do not match, ignore this requirement. + if not install_req.match_markers(extras_requested): + logger.info( + "Ignoring %s: markers '%s' don't match your environment", + install_req.name, + install_req.markers, + ) + return [], None + + # If the wheel is not supported, raise an error. + # Should check this after filtering out based on environment markers to + # allow specifying different wheels based on the environment/OS, in a + # single requirements file. + if install_req.link and install_req.link.is_wheel: + wheel = Wheel(install_req.link.filename) + tags = compatibility_tags.get_supported() + if requirement_set.check_supported_wheels and not wheel.supported(tags): + raise InstallationError( + "{} is not a supported wheel on this platform.".format( + wheel.filename + ) + ) + + # This next bit is really a sanity check. + assert ( + not install_req.user_supplied or parent_req_name is None + ), "a user supplied req shouldn't have a parent" + + # Unnamed requirements are scanned again and the requirement won't be + # added as a dependency until after scanning. + if not install_req.name: + requirement_set.add_unnamed_requirement(install_req) + return [install_req], None + + try: + existing_req: Optional[ + InstallRequirement + ] = requirement_set.get_requirement(install_req.name) + except KeyError: + existing_req = None + + has_conflicting_requirement = ( + parent_req_name is None + and existing_req + and not existing_req.constraint + and existing_req.extras == install_req.extras + and existing_req.req + and install_req.req + and existing_req.req.specifier != install_req.req.specifier + ) + if has_conflicting_requirement: + raise InstallationError( + "Double requirement given: {} (already in {}, name={!r})".format( + install_req, existing_req, install_req.name + ) + ) + + # When no existing requirement exists, add the requirement as a + # dependency and it will be scanned again after. + if not existing_req: + requirement_set.add_named_requirement(install_req) + # We'd want to rescan this requirement later + return [install_req], install_req + + # Assume there's no need to scan, and that we've already + # encountered this for scanning. + if install_req.constraint or not existing_req.constraint: + return [], existing_req + + does_not_satisfy_constraint = install_req.link and not ( + existing_req.link and install_req.link.path == existing_req.link.path + ) + if does_not_satisfy_constraint: + raise InstallationError( + "Could not satisfy constraints for '{}': " + "installation from path or url cannot be " + "constrained to a version".format(install_req.name) + ) + # If we're now installing a constraint, mark the existing + # object for real installation. + existing_req.constraint = False + # If we're now installing a user supplied requirement, + # mark the existing object as such. + if install_req.user_supplied: + existing_req.user_supplied = True + existing_req.extras = tuple( + sorted(set(existing_req.extras) | set(install_req.extras)) + ) + logger.debug( + "Setting %s extras to: %s", + existing_req, + existing_req.extras, + ) + # Return the existing requirement for addition to the parent and + # scanning again. + return [existing_req], existing_req + + def _is_upgrade_allowed(self, req: InstallRequirement) -> bool: if self.upgrade_strategy == "to-satisfy-only": return False elif self.upgrade_strategy == "eager": @@ -192,19 +319,19 @@ class Resolver(BaseResolver): assert self.upgrade_strategy == "only-if-needed" return req.user_supplied or req.constraint - def _set_req_to_reinstall(self, req): - # type: (InstallRequirement) -> None + def _set_req_to_reinstall(self, req: InstallRequirement) -> None: """ Set a requirement to be installed. """ # Don't uninstall the conflict if doing a user install and the # conflict is not a user install. - if not self.use_user_site or dist_in_usersite(req.satisfied_by): + if not self.use_user_site or req.satisfied_by.in_usersite: req.should_reinstall = True req.satisfied_by = None - def _check_skip_installed(self, req_to_install): - # type: (InstallRequirement) -> Optional[str] + def _check_skip_installed( + self, req_to_install: InstallRequirement + ) -> Optional[str]: """Check if req_to_install should be skipped. This will check if the req is installed, and whether we should upgrade @@ -256,8 +383,7 @@ class Resolver(BaseResolver): self._set_req_to_reinstall(req_to_install) return None - def _find_requirement_link(self, req): - # type: (InstallRequirement) -> Optional[Link] + def _find_requirement_link(self, req: InstallRequirement) -> Optional[Link]: upgrade = self._is_upgrade_allowed(req) best_candidate = self.finder.find_requirement(req, upgrade) if not best_candidate: @@ -279,8 +405,7 @@ class Resolver(BaseResolver): return link - def _populate_link(self, req): - # type: (InstallRequirement) -> None + def _populate_link(self, req: InstallRequirement) -> None: """Ensure that if a link can be found for this, that it is found. Note that req.link may still be None - if the requirement is already @@ -306,11 +431,18 @@ class Resolver(BaseResolver): if cache_entry is not None: logger.debug("Using cached wheel link: %s", cache_entry.link) if req.link is req.original_link and cache_entry.persistent: - req.original_link_is_in_wheel_cache = True + req.cached_wheel_source_link = req.link + if cache_entry.origin is not None: + req.download_info = cache_entry.origin + else: + # Legacy cache entry that does not have origin.json. + # download_info may miss the archive_info.hashes field. + req.download_info = direct_url_from_link( + req.link, link_is_in_wheel_cache=cache_entry.persistent + ) req.link = cache_entry.link - def _get_dist_for(self, req): - # type: (InstallRequirement) -> Distribution + def _get_dist_for(self, req: InstallRequirement) -> BaseDistribution: """Takes a InstallRequirement and returns a single AbstractDist \ representing a prepared variant of the same. """ @@ -351,17 +483,16 @@ class Resolver(BaseResolver): self._set_req_to_reinstall(req) else: logger.info( - "Requirement already satisfied (use --upgrade to upgrade):" " %s", + "Requirement already satisfied (use --upgrade to upgrade): %s", req, ) return dist def _resolve_one( self, - requirement_set, # type: RequirementSet - req_to_install, # type: InstallRequirement - ): - # type: (...) -> List[InstallRequirement] + requirement_set: RequirementSet, + req_to_install: InstallRequirement, + ) -> List[InstallRequirement]: """Prepare a single requirements file. :return: A list of additional InstallRequirements to also install. @@ -384,16 +515,16 @@ class Resolver(BaseResolver): ignore_requires_python=self.ignore_requires_python, ) - more_reqs = [] # type: List[InstallRequirement] + more_reqs: List[InstallRequirement] = [] - def add_req(subreq, extras_requested): - # type: (Distribution, Iterable[str]) -> None - sub_install_req = self._make_install_req( - str(subreq), - req_to_install, - ) + def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None: + # This idiosyncratically converts the Requirement to str and let + # make_install_req then parse it again into Requirement. But this is + # the legacy resolver so I'm just not going to bother refactoring. + sub_install_req = self._make_install_req(str(subreq), req_to_install) parent_req_name = req_to_install.name - to_scan_again, add_to_parent = requirement_set.add_requirement( + to_scan_again, add_to_parent = self._add_requirement_to_set( + requirement_set, sub_install_req, parent_req_name=parent_req_name, extras_requested=extras_requested, @@ -410,7 +541,9 @@ class Resolver(BaseResolver): # 'unnamed' requirements can only come from being directly # provided by the user. assert req_to_install.user_supplied - requirement_set.add_requirement(req_to_install, parent_req_name=None) + self._add_requirement_to_set( + requirement_set, req_to_install, parent_req_name=None + ) if not self.ignore_dependencies: if req_to_install.extras: @@ -419,21 +552,27 @@ class Resolver(BaseResolver): ",".join(req_to_install.extras), ) missing_requested = sorted( - set(req_to_install.extras) - set(dist.extras) + set(req_to_install.extras) - set(dist.iter_provided_extras()) ) for missing in missing_requested: - logger.warning("%s does not provide the extra '%s'", dist, missing) + logger.warning( + "%s %s does not provide the extra '%s'", + dist.raw_name, + dist.version, + missing, + ) available_requested = sorted( - set(dist.extras) & set(req_to_install.extras) + set(dist.iter_provided_extras()) & set(req_to_install.extras) ) - for subreq in dist.requires(available_requested): + for subreq in dist.iter_dependencies(available_requested): add_req(subreq, extras_requested=available_requested) return more_reqs - def get_installation_order(self, req_set): - # type: (RequirementSet) -> List[InstallRequirement] + def get_installation_order( + self, req_set: RequirementSet + ) -> List[InstallRequirement]: """Create the installation order. The installation order is topological - requirements are installed @@ -444,10 +583,9 @@ class Resolver(BaseResolver): # installs the user specified things in the order given, except when # dependencies must come earlier to achieve topological order. order = [] - ordered_reqs = set() # type: Set[InstallRequirement] + ordered_reqs: Set[InstallRequirement] = set() - def schedule(req): - # type: (InstallRequirement) -> None + def schedule(req: InstallRequirement) -> None: if req.satisfied_by or req in ordered_reqs: return if req.constraint: diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 234cf5ef336c279d8578e747367caa788f389a39..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-39.pyc deleted file mode 100644 index 2195c90242273d4b1b46da84ea848ec6fda777fd..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/base.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-39.pyc deleted file mode 100644 index 48b07213ca5ccb95f524a0b1d971c0c3ec33c051..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-39.pyc deleted file mode 100644 index d0361bc53a2b1a578c6e225fc0e00a75e89f34dd..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-39.pyc deleted file mode 100644 index b968b76f13c1684f7cd6acab67d83b19b01b61b8..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-39.pyc deleted file mode 100644 index f8278ed7886972a330510fe19f0b8cce6b23f479..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-39.pyc deleted file mode 100644 index 882b0e3cb0a8b38c113681c0fae98ce9ee16b304..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-39.pyc deleted file mode 100644 index 92f11c46100fe5551b474e91a74c25ef37be1b94..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-39.pyc deleted file mode 100644 index 86f04fab60a8c8e2d9b97ea9c22243cf2ba26aad..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py index 26821a1facd7ab3b478dea6425aa0db0620e805a..b206692a0a976d8336e3f5896eadf4765a33fb2c 100644 --- a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py +++ b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py @@ -12,8 +12,7 @@ CandidateLookup = Tuple[Optional["Candidate"], Optional[InstallRequirement]] CandidateVersion = Union[LegacyVersion, Version] -def format_name(project, extras): - # type: (str, FrozenSet[str]) -> str +def format_name(project: str, extras: FrozenSet[str]) -> str: if not extras: return project canonical_extras = sorted(canonicalize_name(e) for e in extras) @@ -21,33 +20,26 @@ def format_name(project, extras): class Constraint: - def __init__(self, specifier, hashes, links): - # type: (SpecifierSet, Hashes, FrozenSet[Link]) -> None + def __init__( + self, specifier: SpecifierSet, hashes: Hashes, links: FrozenSet[Link] + ) -> None: self.specifier = specifier self.hashes = hashes self.links = links @classmethod - def empty(cls): - # type: () -> Constraint + def empty(cls) -> "Constraint": return Constraint(SpecifierSet(), Hashes(), frozenset()) @classmethod - def from_ireq(cls, ireq): - # type: (InstallRequirement) -> Constraint + def from_ireq(cls, ireq: InstallRequirement) -> "Constraint": links = frozenset([ireq.link]) if ireq.link else frozenset() return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links) - def __nonzero__(self): - # type: () -> bool + def __bool__(self) -> bool: return bool(self.specifier) or bool(self.hashes) or bool(self.links) - def __bool__(self): - # type: () -> bool - return self.__nonzero__() - - def __and__(self, other): - # type: (InstallRequirement) -> Constraint + def __and__(self, other: InstallRequirement) -> "Constraint": if not isinstance(other, InstallRequirement): return NotImplemented specifier = self.specifier & other.specifier @@ -57,8 +49,7 @@ class Constraint: links = links.union([other.link]) return Constraint(specifier, hashes, links) - def is_satisfied_by(self, candidate): - # type: (Candidate) -> bool + def is_satisfied_by(self, candidate: "Candidate") -> bool: # Reject if there are any mismatched URL constraints on this package. if self.links and not all(_match_link(link, candidate) for link in self.links): return False @@ -70,8 +61,7 @@ class Constraint: class Requirement: @property - def project_name(self): - # type: () -> NormalizedName + def project_name(self) -> NormalizedName: """The "project name" of a requirement. This is different from ``name`` if this requirement contains extras, @@ -81,8 +71,7 @@ class Requirement: raise NotImplementedError("Subclass should override") @property - def name(self): - # type: () -> str + def name(self) -> str: """The name identifying this requirement in the resolver. This is different from ``project_name`` if this requirement contains @@ -90,21 +79,17 @@ class Requirement: """ raise NotImplementedError("Subclass should override") - def is_satisfied_by(self, candidate): - # type: (Candidate) -> bool + def is_satisfied_by(self, candidate: "Candidate") -> bool: return False - def get_candidate_lookup(self): - # type: () -> CandidateLookup + def get_candidate_lookup(self) -> CandidateLookup: raise NotImplementedError("Subclass should override") - def format_for_error(self): - # type: () -> str + def format_for_error(self) -> str: raise NotImplementedError("Subclass should override") -def _match_link(link, candidate): - # type: (Link, Candidate) -> bool +def _match_link(link: Link, candidate: "Candidate") -> bool: if candidate.source_link: return links_equivalent(link, candidate.source_link) return False @@ -112,8 +97,7 @@ def _match_link(link, candidate): class Candidate: @property - def project_name(self): - # type: () -> NormalizedName + def project_name(self) -> NormalizedName: """The "project name" of the candidate. This is different from ``name`` if this candidate contains extras, @@ -123,8 +107,7 @@ class Candidate: raise NotImplementedError("Override in subclass") @property - def name(self): - # type: () -> str + def name(self) -> str: """The name identifying this candidate in the resolver. This is different from ``project_name`` if this candidate contains @@ -133,33 +116,26 @@ class Candidate: raise NotImplementedError("Override in subclass") @property - def version(self): - # type: () -> CandidateVersion + def version(self) -> CandidateVersion: raise NotImplementedError("Override in subclass") @property - def is_installed(self): - # type: () -> bool + def is_installed(self) -> bool: raise NotImplementedError("Override in subclass") @property - def is_editable(self): - # type: () -> bool + def is_editable(self) -> bool: raise NotImplementedError("Override in subclass") @property - def source_link(self): - # type: () -> Optional[Link] + def source_link(self) -> Optional[Link]: raise NotImplementedError("Override in subclass") - def iter_dependencies(self, with_requires): - # type: (bool) -> Iterable[Optional[Requirement]] + def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: raise NotImplementedError("Override in subclass") - def get_install_requirement(self): - # type: () -> Optional[InstallRequirement] + def get_install_requirement(self) -> Optional[InstallRequirement]: raise NotImplementedError("Override in subclass") - def format_for_error(self): - # type: () -> str + def format_for_error(self) -> str: raise NotImplementedError("Subclass should override") diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py index da516ad3c87f2f0363e391500e7f75cf65080543..de04e1d73f2d86fb3ac094c82b9109ab71a0f917 100644 --- a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py +++ b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py @@ -2,13 +2,15 @@ import logging import sys from typing import TYPE_CHECKING, Any, FrozenSet, Iterable, Optional, Tuple, Union, cast -from pip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet from pip._vendor.packaging.utils import NormalizedName, canonicalize_name from pip._vendor.packaging.version import Version -from pip._vendor.packaging.version import parse as parse_version -from pip._vendor.pkg_resources import Distribution -from pip._internal.exceptions import HashError, MetadataInconsistent +from pip._internal.exceptions import ( + HashError, + InstallationSubprocessError, + MetadataInconsistent, +) +from pip._internal.metadata import BaseDistribution from pip._internal.models.link import Link, links_equivalent from pip._internal.models.wheel import Wheel from pip._internal.req.constructors import ( @@ -16,8 +18,8 @@ from pip._internal.req.constructors import ( install_req_from_line, ) from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.misc import dist_is_editable, normalize_version_info -from pip._internal.utils.packaging import get_requires_python +from pip._internal.utils.direct_url_helpers import direct_url_from_link +from pip._internal.utils.misc import normalize_version_info from .base import Candidate, CandidateVersion, Requirement, format_name @@ -32,6 +34,9 @@ BaseCandidate = Union[ "LinkCandidate", ] +# Avoid conflicting with the PyPI package "Python". +REQUIRES_PYTHON_IDENTIFIER = cast(NormalizedName, "<Python from Requires-Python>") + def as_base_candidate(candidate: Candidate) -> Optional[BaseCandidate]: """The runtime version of BaseCandidate.""" @@ -45,8 +50,9 @@ def as_base_candidate(candidate: Candidate) -> Optional[BaseCandidate]: return None -def make_install_req_from_link(link, template): - # type: (Link, InstallRequirement) -> InstallRequirement +def make_install_req_from_link( + link: Link, template: InstallRequirement +) -> InstallRequirement: assert not template.editable, "template is editable" if template.req: line = str(template.req) @@ -59,44 +65,45 @@ def make_install_req_from_link(link, template): use_pep517=template.use_pep517, isolated=template.isolated, constraint=template.constraint, - options=dict( - install_options=template.install_options, - global_options=template.global_options, - hashes=template.hash_options, - ), + global_options=template.global_options, + hash_options=template.hash_options, + config_settings=template.config_settings, ) ireq.original_link = template.original_link ireq.link = link + ireq.extras = template.extras return ireq -def make_install_req_from_editable(link, template): - # type: (Link, InstallRequirement) -> InstallRequirement +def make_install_req_from_editable( + link: Link, template: InstallRequirement +) -> InstallRequirement: assert template.editable, "template not editable" - return install_req_from_editable( + ireq = install_req_from_editable( link.url, user_supplied=template.user_supplied, comes_from=template.comes_from, use_pep517=template.use_pep517, isolated=template.isolated, constraint=template.constraint, - options=dict( - install_options=template.install_options, - global_options=template.global_options, - hashes=template.hash_options, - ), + permit_editable_wheels=template.permit_editable_wheels, + global_options=template.global_options, + hash_options=template.hash_options, + config_settings=template.config_settings, ) + ireq.extras = template.extras + return ireq -def make_install_req_from_dist(dist, template): - # type: (Distribution, InstallRequirement) -> InstallRequirement - project_name = canonicalize_name(dist.project_name) +def _make_install_req_from_dist( + dist: BaseDistribution, template: InstallRequirement +) -> InstallRequirement: if template.req: line = str(template.req) elif template.link: - line = f"{project_name} @ {template.link.url}" + line = f"{dist.canonical_name} @ {template.link.url}" else: - line = f"{project_name}=={dist.parsed_version}" + line = f"{dist.canonical_name}=={dist.version}" ireq = install_req_from_line( line, user_supplied=template.user_supplied, @@ -104,11 +111,9 @@ def make_install_req_from_dist(dist, template): use_pep517=template.use_pep517, isolated=template.isolated, constraint=template.constraint, - options=dict( - install_options=template.install_options, - global_options=template.global_options, - hashes=template.hash_options, - ), + global_options=template.global_options, + hash_options=template.hash_options, + config_settings=template.config_settings, ) ireq.satisfied_by = dist return ireq @@ -130,18 +135,18 @@ class _InstallRequirementBackedCandidate(Candidate): found remote link (e.g. from pypi.org). """ + dist: BaseDistribution is_installed = False def __init__( self, - link, # type: Link - source_link, # type: Link - ireq, # type: InstallRequirement - factory, # type: Factory - name=None, # type: Optional[NormalizedName] - version=None, # type: Optional[CandidateVersion] - ): - # type: (...) -> None + link: Link, + source_link: Link, + ireq: InstallRequirement, + factory: "Factory", + name: Optional[NormalizedName] = None, + version: Optional[CandidateVersion] = None, + ) -> None: self._link = link self._source_link = source_link self._factory = factory @@ -150,86 +155,72 @@ class _InstallRequirementBackedCandidate(Candidate): self._version = version self.dist = self._prepare() - def __str__(self): - # type: () -> str + def __str__(self) -> str: return f"{self.name} {self.version}" - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return "{class_name}({link!r})".format( class_name=self.__class__.__name__, link=str(self._link), ) - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash((self.__class__, self._link)) - def __eq__(self, other): - # type: (Any) -> bool + def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): return links_equivalent(self._link, other._link) return False @property - def source_link(self): - # type: () -> Optional[Link] + def source_link(self) -> Optional[Link]: return self._source_link @property - def project_name(self): - # type: () -> NormalizedName + def project_name(self) -> NormalizedName: """The normalised name of the project the candidate refers to""" if self._name is None: - self._name = canonicalize_name(self.dist.project_name) + self._name = self.dist.canonical_name return self._name @property - def name(self): - # type: () -> str + def name(self) -> str: return self.project_name @property - def version(self): - # type: () -> CandidateVersion + def version(self) -> CandidateVersion: if self._version is None: - self._version = parse_version(self.dist.version) + self._version = self.dist.version return self._version - def format_for_error(self): - # type: () -> str + def format_for_error(self) -> str: return "{} {} (from {})".format( self.name, self.version, self._link.file_path if self._link.is_file else self._link, ) - def _prepare_distribution(self): - # type: () -> Distribution + def _prepare_distribution(self) -> BaseDistribution: raise NotImplementedError("Override in subclass") - def _check_metadata_consistency(self, dist): - # type: (Distribution) -> None + def _check_metadata_consistency(self, dist: BaseDistribution) -> None: """Check for consistency of project name and version of dist.""" - canonical_name = canonicalize_name(dist.project_name) - if self._name is not None and self._name != canonical_name: + if self._name is not None and self._name != dist.canonical_name: raise MetadataInconsistent( self._ireq, "name", self._name, - dist.project_name, + dist.canonical_name, ) - parsed_version = parse_version(dist.version) - if self._version is not None and self._version != parsed_version: + if self._version is not None and self._version != dist.version: raise MetadataInconsistent( self._ireq, "version", str(self._version), - dist.version, + str(dist.version), ) - def _prepare(self): - # type: () -> Distribution + def _prepare(self) -> BaseDistribution: try: dist = self._prepare_distribution() except HashError as e: @@ -238,31 +229,21 @@ class _InstallRequirementBackedCandidate(Candidate): # offending line to the user. e.req = self._ireq raise + except InstallationSubprocessError as exc: + # The output has been presented already, so don't duplicate it. + exc.context = "See above for output." + raise + self._check_metadata_consistency(dist) return dist - def _get_requires_python_dependency(self): - # type: () -> Optional[Requirement] - requires_python = get_requires_python(self.dist) - if requires_python is None: - return None - try: - spec = SpecifierSet(requires_python) - except InvalidSpecifier as e: - message = "Package %r has an invalid Requires-Python: %s" - logger.warning(message, self.name, e) - return None - return self._factory.make_requires_python_requirement(spec) - - def iter_dependencies(self, with_requires): - # type: (bool) -> Iterable[Optional[Requirement]] - requires = self.dist.requires() if with_requires else () + def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: + requires = self.dist.iter_dependencies() if with_requires else () for r in requires: yield self._factory.make_requirement_from_spec(str(r), self._ireq) - yield self._get_requires_python_dependency() + yield self._factory.make_requires_python_requirement(self.dist.requires_python) - def get_install_requirement(self): - # type: () -> Optional[InstallRequirement] + def get_install_requirement(self) -> Optional[InstallRequirement]: return self._ireq @@ -271,15 +252,14 @@ class LinkCandidate(_InstallRequirementBackedCandidate): def __init__( self, - link, # type: Link - template, # type: InstallRequirement - factory, # type: Factory - name=None, # type: Optional[NormalizedName] - version=None, # type: Optional[CandidateVersion] - ): - # type: (...) -> None + link: Link, + template: InstallRequirement, + factory: "Factory", + name: Optional[NormalizedName] = None, + version: Optional[CandidateVersion] = None, + ) -> None: source_link = link - cache_entry = factory.get_wheel_cache_entry(link, name) + cache_entry = factory.get_wheel_cache_entry(source_link, name) if cache_entry is not None: logger.debug("Using cached wheel link: %s", cache_entry.link) link = cache_entry.link @@ -296,12 +276,19 @@ class LinkCandidate(_InstallRequirementBackedCandidate): version, wheel_version, name ) - if ( - cache_entry is not None - and cache_entry.persistent - and template.link is template.original_link - ): - ireq.original_link_is_in_wheel_cache = True + if cache_entry is not None: + assert ireq.link.is_wheel + assert ireq.link.is_file + if cache_entry.persistent and template.link is template.original_link: + ireq.cached_wheel_source_link = source_link + if cache_entry.origin is not None: + ireq.download_info = cache_entry.origin + else: + # Legacy cache entry that does not have origin.json. + # download_info may miss the archive_info.hashes field. + ireq.download_info = direct_url_from_link( + source_link, link_is_in_wheel_cache=cache_entry.persistent + ) super().__init__( link=link, @@ -312,11 +299,9 @@ class LinkCandidate(_InstallRequirementBackedCandidate): version=version, ) - def _prepare_distribution(self): - # type: () -> Distribution - return self._factory.preparer.prepare_linked_requirement( - self._ireq, parallel_builds=True - ) + def _prepare_distribution(self) -> BaseDistribution: + preparer = self._factory.preparer + return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True) class EditableCandidate(_InstallRequirementBackedCandidate): @@ -324,13 +309,12 @@ class EditableCandidate(_InstallRequirementBackedCandidate): def __init__( self, - link, # type: Link - template, # type: InstallRequirement - factory, # type: Factory - name=None, # type: Optional[NormalizedName] - version=None, # type: Optional[CandidateVersion] - ): - # type: (...) -> None + link: Link, + template: InstallRequirement, + factory: "Factory", + name: Optional[NormalizedName] = None, + version: Optional[CandidateVersion] = None, + ) -> None: super().__init__( link=link, source_link=link, @@ -340,8 +324,7 @@ class EditableCandidate(_InstallRequirementBackedCandidate): version=version, ) - def _prepare_distribution(self): - # type: () -> Distribution + def _prepare_distribution(self) -> BaseDistribution: return self._factory.preparer.prepare_editable_requirement(self._ireq) @@ -351,76 +334,67 @@ class AlreadyInstalledCandidate(Candidate): def __init__( self, - dist, # type: Distribution - template, # type: InstallRequirement - factory, # type: Factory - ): - # type: (...) -> None + dist: BaseDistribution, + template: InstallRequirement, + factory: "Factory", + ) -> None: self.dist = dist - self._ireq = make_install_req_from_dist(dist, template) + self._ireq = _make_install_req_from_dist(dist, template) self._factory = factory + self._version = None # This is just logging some messages, so we can do it eagerly. # The returned dist would be exactly the same as self.dist because we - # set satisfied_by in make_install_req_from_dist. + # set satisfied_by in _make_install_req_from_dist. # TODO: Supply reason based on force_reinstall and upgrade_strategy. skip_reason = "already satisfied" factory.preparer.prepare_installed_requirement(self._ireq, skip_reason) - def __str__(self): - # type: () -> str + def __str__(self) -> str: return str(self.dist) - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return "{class_name}({distribution!r})".format( class_name=self.__class__.__name__, distribution=self.dist, ) - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash((self.__class__, self.name, self.version)) - def __eq__(self, other): - # type: (Any) -> bool + def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): return self.name == other.name and self.version == other.version return False @property - def project_name(self): - # type: () -> NormalizedName - return canonicalize_name(self.dist.project_name) + def project_name(self) -> NormalizedName: + return self.dist.canonical_name @property - def name(self): - # type: () -> str + def name(self) -> str: return self.project_name @property - def version(self): - # type: () -> CandidateVersion - return parse_version(self.dist.version) + def version(self) -> CandidateVersion: + if self._version is None: + self._version = self.dist.version + return self._version @property - def is_editable(self): - # type: () -> bool - return dist_is_editable(self.dist) + def is_editable(self) -> bool: + return self.dist.editable - def format_for_error(self): - # type: () -> str + def format_for_error(self) -> str: return f"{self.name} {self.version} (Installed)" - def iter_dependencies(self, with_requires): - # type: (bool) -> Iterable[Optional[Requirement]] + def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: if not with_requires: return - for r in self.dist.requires(): + for r in self.dist.iter_dependencies(): yield self._factory.make_requirement_from_spec(str(r), self._ireq) - def get_install_requirement(self): - # type: () -> Optional[InstallRequirement] + def get_install_requirement(self) -> Optional[InstallRequirement]: return None @@ -451,75 +425,62 @@ class ExtrasCandidate(Candidate): def __init__( self, - base, # type: BaseCandidate - extras, # type: FrozenSet[str] - ): - # type: (...) -> None + base: BaseCandidate, + extras: FrozenSet[str], + ) -> None: self.base = base self.extras = extras - def __str__(self): - # type: () -> str + def __str__(self) -> str: name, rest = str(self.base).split(" ", 1) return "{}[{}] {}".format(name, ",".join(self.extras), rest) - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return "{class_name}(base={base!r}, extras={extras!r})".format( class_name=self.__class__.__name__, base=self.base, extras=self.extras, ) - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash((self.base, self.extras)) - def __eq__(self, other): - # type: (Any) -> bool + def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): return self.base == other.base and self.extras == other.extras return False @property - def project_name(self): - # type: () -> NormalizedName + def project_name(self) -> NormalizedName: return self.base.project_name @property - def name(self): - # type: () -> str + def name(self) -> str: """The normalised name of the project the candidate refers to""" return format_name(self.base.project_name, self.extras) @property - def version(self): - # type: () -> CandidateVersion + def version(self) -> CandidateVersion: return self.base.version - def format_for_error(self): - # type: () -> str + def format_for_error(self) -> str: return "{} [{}]".format( self.base.format_for_error(), ", ".join(sorted(self.extras)) ) @property - def is_installed(self): - # type: () -> bool + def is_installed(self) -> bool: return self.base.is_installed @property - def is_editable(self): - # type: () -> bool + def is_editable(self) -> bool: return self.base.is_editable @property - def source_link(self): - # type: () -> Optional[Link] + def source_link(self) -> Optional[Link]: return self.base.source_link - def iter_dependencies(self, with_requires): - # type: (bool) -> Iterable[Optional[Requirement]] + def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: factory = self.base._factory # Add a dependency on the exact base @@ -530,8 +491,8 @@ class ExtrasCandidate(Candidate): # The user may have specified extras that the candidate doesn't # support. We ignore any unsupported extras here. - valid_extras = self.extras.intersection(self.base.dist.extras) - invalid_extras = self.extras.difference(self.base.dist.extras) + valid_extras = self.extras.intersection(self.base.dist.iter_provided_extras()) + invalid_extras = self.extras.difference(self.base.dist.iter_provided_extras()) for extra in sorted(invalid_extras): logger.warning( "%s %s does not provide the extra '%s'", @@ -540,15 +501,14 @@ class ExtrasCandidate(Candidate): extra, ) - for r in self.base.dist.requires(valid_extras): + for r in self.base.dist.iter_dependencies(valid_extras): requirement = factory.make_requirement_from_spec( str(r), self.base._ireq, valid_extras ) if requirement: yield requirement - def get_install_requirement(self): - # type: () -> Optional[InstallRequirement] + def get_install_requirement(self) -> Optional[InstallRequirement]: # We don't return anything here, because we always # depend on the base candidate, and we'll get the # install requirement from that. @@ -559,8 +519,7 @@ class RequiresPythonCandidate(Candidate): is_installed = False source_link = None - def __init__(self, py_version_info): - # type: (Optional[Tuple[int, ...]]) -> None + def __init__(self, py_version_info: Optional[Tuple[int, ...]]) -> None: if py_version_info is not None: version_info = normalize_version_info(py_version_info) else: @@ -571,34 +530,26 @@ class RequiresPythonCandidate(Candidate): # only one RequiresPythonCandidate in a resolution, i.e. the host Python. # The built-in object.__eq__() and object.__ne__() do exactly what we want. - def __str__(self): - # type: () -> str + def __str__(self) -> str: return f"Python {self._version}" @property - def project_name(self): - # type: () -> NormalizedName - # Avoid conflicting with the PyPI package "Python". - return cast(NormalizedName, "<Python from Requires-Python>") + def project_name(self) -> NormalizedName: + return REQUIRES_PYTHON_IDENTIFIER @property - def name(self): - # type: () -> str - return self.project_name + def name(self) -> str: + return REQUIRES_PYTHON_IDENTIFIER @property - def version(self): - # type: () -> CandidateVersion + def version(self) -> CandidateVersion: return self._version - def format_for_error(self): - # type: () -> str + def format_for_error(self) -> str: return f"Python {self.version}" - def iter_dependencies(self, with_requires): - # type: (bool) -> Iterable[Optional[Requirement]] + def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: return () - def get_install_requirement(self): - # type: () -> Optional[InstallRequirement] + def get_install_requirement(self) -> Optional[InstallRequirement]: return None diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py index 5816a0ede382d479b30069042a256a27d6481a01..0331297b85b89c3387c3868d6254f420ed6a0381 100644 --- a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py +++ b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py @@ -9,6 +9,7 @@ from typing import ( Iterator, List, Mapping, + NamedTuple, Optional, Sequence, Set, @@ -18,35 +19,32 @@ from typing import ( ) from pip._vendor.packaging.requirements import InvalidRequirement -from pip._vendor.packaging.requirements import Requirement as PackagingRequirement from pip._vendor.packaging.specifiers import SpecifierSet from pip._vendor.packaging.utils import NormalizedName, canonicalize_name -from pip._vendor.pkg_resources import Distribution from pip._vendor.resolvelib import ResolutionImpossible from pip._internal.cache import CacheEntry, WheelCache from pip._internal.exceptions import ( DistributionNotFound, InstallationError, - InstallationSubprocessError, MetadataInconsistent, UnsupportedPythonVersion, UnsupportedWheel, ) from pip._internal.index.package_finder import PackageFinder +from pip._internal.metadata import BaseDistribution, get_default_environment from pip._internal.models.link import Link from pip._internal.models.wheel import Wheel from pip._internal.operations.prepare import RequirementPreparer from pip._internal.req.constructors import install_req_from_link_and_ireq -from pip._internal.req.req_install import InstallRequirement +from pip._internal.req.req_install import ( + InstallRequirement, + check_invalid_constraint_type, +) from pip._internal.resolution.base import InstallRequirementProvider from pip._internal.utils.compatibility_tags import get_supported from pip._internal.utils.hashes import Hashes -from pip._internal.utils.misc import ( - dist_in_site_packages, - dist_in_usersite, - get_installed_distributions, -) +from pip._internal.utils.packaging import get_requirement from pip._internal.utils.virtualenv import running_under_virtualenv from .base import Candidate, CandidateVersion, Constraint, Requirement @@ -81,20 +79,25 @@ C = TypeVar("C") Cache = Dict[Link, C] +class CollectedRootRequirements(NamedTuple): + requirements: List[Requirement] + constraints: Dict[str, Constraint] + user_requested: Dict[str, int] + + class Factory: def __init__( self, - finder, # type: PackageFinder - preparer, # type: RequirementPreparer - make_install_req, # type: InstallRequirementProvider - wheel_cache, # type: Optional[WheelCache] - use_user_site, # type: bool - force_reinstall, # type: bool - ignore_installed, # type: bool - ignore_requires_python, # type: bool - py_version_info=None, # type: Optional[Tuple[int, ...]] - ): - # type: (...) -> None + finder: PackageFinder, + preparer: RequirementPreparer, + make_install_req: InstallRequirementProvider, + wheel_cache: Optional[WheelCache], + use_user_site: bool, + force_reinstall: bool, + ignore_installed: bool, + ignore_requires_python: bool, + py_version_info: Optional[Tuple[int, ...]] = None, + ) -> None: self._finder = finder self.preparer = preparer self._wheel_cache = wheel_cache @@ -104,27 +107,25 @@ class Factory: self._force_reinstall = force_reinstall self._ignore_requires_python = ignore_requires_python - self._build_failures = {} # type: Cache[InstallationError] - self._link_candidate_cache = {} # type: Cache[LinkCandidate] - self._editable_candidate_cache = {} # type: Cache[EditableCandidate] - self._installed_candidate_cache = ( - {} - ) # type: Dict[str, AlreadyInstalledCandidate] - self._extras_candidate_cache = ( - {} - ) # type: Dict[Tuple[int, FrozenSet[str]], ExtrasCandidate] + self._build_failures: Cache[InstallationError] = {} + self._link_candidate_cache: Cache[LinkCandidate] = {} + self._editable_candidate_cache: Cache[EditableCandidate] = {} + self._installed_candidate_cache: Dict[str, AlreadyInstalledCandidate] = {} + self._extras_candidate_cache: Dict[ + Tuple[int, FrozenSet[str]], ExtrasCandidate + ] = {} if not ignore_installed: + env = get_default_environment() self._installed_dists = { - canonicalize_name(dist.project_name): dist - for dist in get_installed_distributions(local_only=False) + dist.canonical_name: dist + for dist in env.iter_installed_distributions(local_only=False) } else: self._installed_dists = {} @property - def force_reinstall(self): - # type: () -> bool + def force_reinstall(self) -> bool: return self._force_reinstall def _fail_if_link_is_unsupported_wheel(self, link: Link) -> None: @@ -136,8 +137,9 @@ class Factory: msg = f"{link.filename} is not a supported wheel on this platform." raise UnsupportedWheel(msg) - def _make_extras_candidate(self, base, extras): - # type: (BaseCandidate, FrozenSet[str]) -> ExtrasCandidate + def _make_extras_candidate( + self, base: BaseCandidate, extras: FrozenSet[str] + ) -> ExtrasCandidate: cache_key = (id(base), extras) try: candidate = self._extras_candidate_cache[cache_key] @@ -148,29 +150,27 @@ class Factory: def _make_candidate_from_dist( self, - dist, # type: Distribution - extras, # type: FrozenSet[str] - template, # type: InstallRequirement - ): - # type: (...) -> Candidate + dist: BaseDistribution, + extras: FrozenSet[str], + template: InstallRequirement, + ) -> Candidate: try: - base = self._installed_candidate_cache[dist.key] + base = self._installed_candidate_cache[dist.canonical_name] except KeyError: base = AlreadyInstalledCandidate(dist, template, factory=self) - self._installed_candidate_cache[dist.key] = base + self._installed_candidate_cache[dist.canonical_name] = base if not extras: return base return self._make_extras_candidate(base, extras) def _make_candidate_from_link( self, - link, # type: Link - extras, # type: FrozenSet[str] - template, # type: InstallRequirement - name, # type: Optional[NormalizedName] - version, # type: Optional[CandidateVersion] - ): - # type: (...) -> Optional[Candidate] + link: Link, + extras: FrozenSet[str], + template: InstallRequirement, + name: Optional[NormalizedName], + version: Optional[CandidateVersion], + ) -> Optional[Candidate]: # TODO: Check already installed candidate, and use it if the link and # editable flag match. @@ -189,11 +189,17 @@ class Factory: name=name, version=version, ) - except (InstallationSubprocessError, MetadataInconsistent) as e: - logger.warning("Discarding %s. %s", link, e) + except MetadataInconsistent as e: + logger.info( + "Discarding [blue underline]%s[/]: [yellow]%s[reset]", + link, + e, + extra={"markup": True}, + ) self._build_failures[link] = e return None - base = self._editable_candidate_cache[link] # type: BaseCandidate + + base: BaseCandidate = self._editable_candidate_cache[link] else: if link not in self._link_candidate_cache: try: @@ -204,8 +210,13 @@ class Factory: name=name, version=version, ) - except (InstallationSubprocessError, MetadataInconsistent) as e: - logger.warning("Discarding %s. %s", link, e) + except MetadataInconsistent as e: + logger.info( + "Discarding [blue underline]%s[/]: [yellow]%s[reset]", + link, + e, + extra={"markup": True}, + ) self._build_failures[link] = e return None base = self._link_candidate_cache[link] @@ -233,7 +244,7 @@ class Factory: assert template.req, "Candidates found on index must be PEP 508" name = canonicalize_name(template.req.name) - extras = frozenset() # type: FrozenSet[str] + extras: FrozenSet[str] = frozenset() for ireq in ireqs: assert ireq.req, "Candidates found on index must be PEP 508" specifier &= ireq.req.specifier @@ -259,13 +270,12 @@ class Factory: extras=extras, template=template, ) - # The candidate is a known incompatiblity. Don't use it. + # The candidate is a known incompatibility. Don't use it. if id(candidate) in incompatible_ids: return None return candidate - def iter_index_candidate_infos(): - # type: () -> Iterator[IndexCandidateInfo] + def iter_index_candidate_infos() -> Iterator[IndexCandidateInfo]: result = self._finder.find_best_candidate( project_name=name, specifier=specifier, @@ -273,14 +283,27 @@ class Factory: ) icans = list(result.iter_applicable()) - # PEP 592: Yanked releases must be ignored unless only yanked - # releases can satisfy the version range. So if this is false, - # all yanked icans need to be skipped. + # PEP 592: Yanked releases are ignored unless the specifier + # explicitly pins a version (via '==' or '===') that can be + # solely satisfied by a yanked release. all_yanked = all(ican.link.is_yanked for ican in icans) + def is_pinned(specifier: SpecifierSet) -> bool: + for sp in specifier: + if sp.operator == "===": + return True + if sp.operator != "==": + continue + if sp.version.endswith(".*"): + continue + return True + return False + + pinned = is_pinned(specifier) + # PackageFinder returns earlier versions first, so we reverse. for ican in reversed(icans): - if not all_yanked and ican.link.is_yanked: + if not (all_yanked and pinned) and ican.link.is_yanked: continue func = functools.partial( self._make_candidate_from_link, @@ -347,14 +370,14 @@ class Factory: def find_candidates( self, identifier: str, - requirements: Mapping[str, Iterator[Requirement]], + requirements: Mapping[str, Iterable[Requirement]], incompatibilities: Mapping[str, Iterator[Candidate]], constraint: Constraint, prefers_installed: bool, ) -> Iterable[Candidate]: # Collect basic lookup information from the requirements. - explicit_candidates = set() # type: Set[Candidate] - ireqs = [] # type: List[InstallRequirement] + explicit_candidates: Set[Candidate] = set() + ireqs: List[InstallRequirement] = [] for req in requirements[identifier]: cand, ireq = req.get_candidate_lookup() if cand is not None: @@ -365,7 +388,7 @@ class Factory: # If the current identifier contains extras, add explicit candidates # from entries from extra-less identifier. with contextlib.suppress(InvalidRequirement): - parsed_requirement = PackagingRequirement(identifier) + parsed_requirement = get_requirement(identifier) explicit_candidates.update( self._iter_explicit_candidates_from_base( requirements.get(parsed_requirement.name, ()), @@ -374,7 +397,7 @@ class Factory: ) # Add explicit candidates from constraints. We only do this if there are - # kown ireqs, which represent requirements not already explicit. If + # known ireqs, which represent requirements not already explicit. If # there are no ireqs, we're constraining already-explicit requirements, # which is handled later when we return the explicit candidates. if ireqs: @@ -414,8 +437,9 @@ class Factory: and all(req.is_satisfied_by(c) for req in requirements[identifier]) ) - def make_requirement_from_install_req(self, ireq, requested_extras): - # type: (InstallRequirement, Iterable[str]) -> Optional[Requirement] + def _make_requirement_from_install_req( + self, ireq: InstallRequirement, requested_extras: Iterable[str] + ) -> Optional[Requirement]: if not ireq.match_markers(requested_extras): logger.info( "Ignoring %s: markers '%s' don't match your environment", @@ -445,28 +469,64 @@ class Factory: return UnsatisfiableRequirement(canonicalize_name(ireq.name)) return self.make_requirement_from_candidate(cand) - def make_requirement_from_candidate(self, candidate): - # type: (Candidate) -> ExplicitRequirement + def collect_root_requirements( + self, root_ireqs: List[InstallRequirement] + ) -> CollectedRootRequirements: + collected = CollectedRootRequirements([], {}, {}) + for i, ireq in enumerate(root_ireqs): + if ireq.constraint: + # Ensure we only accept valid constraints + problem = check_invalid_constraint_type(ireq) + if problem: + raise InstallationError(problem) + if not ireq.match_markers(): + continue + assert ireq.name, "Constraint must be named" + name = canonicalize_name(ireq.name) + if name in collected.constraints: + collected.constraints[name] &= ireq + else: + collected.constraints[name] = Constraint.from_ireq(ireq) + else: + req = self._make_requirement_from_install_req( + ireq, + requested_extras=(), + ) + if req is None: + continue + if ireq.user_supplied and req.name not in collected.user_requested: + collected.user_requested[req.name] = i + collected.requirements.append(req) + return collected + + def make_requirement_from_candidate( + self, candidate: Candidate + ) -> ExplicitRequirement: return ExplicitRequirement(candidate) def make_requirement_from_spec( self, - specifier, # type: str - comes_from, # type: InstallRequirement - requested_extras=(), # type: Iterable[str] - ): - # type: (...) -> Optional[Requirement] + specifier: str, + comes_from: Optional[InstallRequirement], + requested_extras: Iterable[str] = (), + ) -> Optional[Requirement]: ireq = self._make_install_req_from_spec(specifier, comes_from) - return self.make_requirement_from_install_req(ireq, requested_extras) + return self._make_requirement_from_install_req(ireq, requested_extras) - def make_requires_python_requirement(self, specifier): - # type: (Optional[SpecifierSet]) -> Optional[Requirement] - if self._ignore_requires_python or specifier is None: + def make_requires_python_requirement( + self, + specifier: SpecifierSet, + ) -> Optional[Requirement]: + if self._ignore_requires_python: + return None + # Don't bother creating a dependency for an empty Requires-Python. + if not str(specifier): return None return RequiresPythonRequirement(specifier, self._python_candidate) - def get_wheel_cache_entry(self, link, name): - # type: (Link, Optional[str]) -> Optional[CacheEntry] + def get_wheel_cache_entry( + self, link: Link, name: Optional[str] + ) -> Optional[CacheEntry]: """Look up the link in the wheel cache. If ``preparer.require_hashes`` is True, don't use the wheel cache, @@ -475,7 +535,7 @@ class Factory: hash mismatches. Furthermore, cached wheels at present have nondeterministic contents due to file modification times. """ - if self._wheel_cache is None or self.preparer.require_hashes: + if self._wheel_cache is None: return None return self._wheel_cache.get_cache_entry( link=link, @@ -483,8 +543,7 @@ class Factory: supported_tags=get_supported(), ) - def get_dist_to_uninstall(self, candidate): - # type: (Candidate) -> Optional[Distribution] + def get_dist_to_uninstall(self, candidate: Candidate) -> Optional[BaseDistribution]: # TODO: Are there more cases this needs to return True? Editable? dist = self._installed_dists.get(candidate.project_name) if dist is None: # Not installed, no uninstallation required. @@ -497,25 +556,24 @@ class Factory: return dist # We're installing into user site. Remove the user site installation. - if dist_in_usersite(dist): + if dist.in_usersite: return dist # We're installing into user site, but the installed incompatible # package is in global site. We can't uninstall that, and would let # the new user installation to "shadow" it. But shadowing won't work # in virtual environments, so we error out. - if running_under_virtualenv() and dist_in_site_packages(dist): - raise InstallationError( - "Will not install to the user site because it will " - "lack sys.path precedence to {} in {}".format( - dist.project_name, - dist.location, - ) + if running_under_virtualenv() and dist.in_site_packages: + message = ( + f"Will not install to the user site because it will lack " + f"sys.path precedence to {dist.raw_name} in {dist.location}" ) + raise InstallationError(message) return None - def _report_requires_python_error(self, causes): - # type: (Sequence[ConflictCause]) -> UnsupportedPythonVersion + def _report_requires_python_error( + self, causes: Sequence["ConflictCause"] + ) -> UnsupportedPythonVersion: assert causes, "Requires-Python error reported with no cause" version = self._python_candidate.version @@ -535,32 +593,45 @@ class Factory: message += f"\n{specifier!r} (required by {package})" return UnsupportedPythonVersion(message) - def _report_single_requirement_conflict(self, req, parent): - # type: (Requirement, Optional[Candidate]) -> DistributionNotFound + def _report_single_requirement_conflict( + self, req: Requirement, parent: Optional[Candidate] + ) -> DistributionNotFound: if parent is None: req_disp = str(req) else: req_disp = f"{req} (from {parent.name})" cands = self._finder.find_all_candidates(req.project_name) + skipped_by_requires_python = self._finder.requires_python_skipped_reasons() versions = [str(v) for v in sorted({c.version for c in cands})] + if skipped_by_requires_python: + logger.critical( + "Ignored the following versions that require a different python " + "version: %s", + "; ".join(skipped_by_requires_python) or "none", + ) logger.critical( "Could not find a version that satisfies the requirement %s " "(from versions: %s)", req_disp, ", ".join(versions) or "none", ) + if str(req) == "requirements.txt": + logger.info( + "HINT: You are attempting to install a package literally " + 'named "requirements.txt" (which cannot exist). Consider ' + "using the '-r' flag to install the packages listed in " + "requirements.txt" + ) return DistributionNotFound(f"No matching distribution found for {req}") def get_installation_error( self, - e, # type: ResolutionImpossible[Requirement, Candidate] - constraints, # type: Dict[str, Constraint] - ): - # type: (...) -> InstallationError - + e: "ResolutionImpossible[Requirement, Candidate]", + constraints: Dict[str, Constraint], + ) -> InstallationError: assert e.causes, "Installation error reported with no cause" # If one of the things we can't solve is "we need Python X.Y", @@ -573,7 +644,7 @@ class Factory: ] if requires_python_causes: # The comprehension above makes sure all Requirement instances are - # RequiresPythonRequirement, so let's cast for convinience. + # RequiresPythonRequirement, so let's cast for convenience. return self._report_requires_python_error( cast("Sequence[ConflictCause]", requires_python_causes), ) @@ -592,15 +663,13 @@ class Factory: # satisfied at once. # A couple of formatting helpers - def text_join(parts): - # type: (List[str]) -> str + def text_join(parts: List[str]) -> str: if len(parts) == 1: return parts[0] return ", ".join(parts[:-1]) + " and " + parts[-1] - def describe_trigger(parent): - # type: (Candidate) -> str + def describe_trigger(parent: Candidate) -> str: ireq = parent.get_install_requirement() if not ireq or not ireq.comes_from: return f"{parent.name}=={parent.version}" @@ -656,6 +725,6 @@ class Factory: return DistributionNotFound( "ResolutionImpossible: for help visit " - "https://pip.pypa.io/en/latest/user_guide/" - "#fixing-conflicting-dependencies" + "https://pip.pypa.io/en/latest/topics/dependency-resolution/" + "#dealing-with-dependency-conflicts" ) diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py index 21fa08ec938bd2ce0cda2c1bc25dfa1fb1f247e4..8663097b447cdd80c52e2b2abde33a4736ddb9c2 100644 --- a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py +++ b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py @@ -9,24 +9,38 @@ something. """ import functools -from typing import Callable, Iterator, Optional, Set, Tuple +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Set, Tuple from pip._vendor.packaging.version import _BaseVersion -from pip._vendor.six.moves import collections_abc # type: ignore from .base import Candidate IndexCandidateInfo = Tuple[_BaseVersion, Callable[[], Optional[Candidate]]] - -def _iter_built(infos): - # type: (Iterator[IndexCandidateInfo]) -> Iterator[Candidate] +if TYPE_CHECKING: + SequenceCandidate = Sequence[Candidate] +else: + # For compatibility: Python before 3.9 does not support using [] on the + # Sequence class. + # + # >>> from collections.abc import Sequence + # >>> Sequence[str] + # Traceback (most recent call last): + # File "<stdin>", line 1, in <module> + # TypeError: 'ABCMeta' object is not subscriptable + # + # TODO: Remove this block after dropping Python 3.8 support. + SequenceCandidate = Sequence + + +def _iter_built(infos: Iterator[IndexCandidateInfo]) -> Iterator[Candidate]: """Iterator for ``FoundCandidates``. This iterator is used when the package is not already installed. Candidates from index come later in their normal ordering. """ - versions_found = set() # type: Set[_BaseVersion] + versions_found: Set[_BaseVersion] = set() for version, func in infos: if version in versions_found: continue @@ -37,8 +51,9 @@ def _iter_built(infos): versions_found.add(version) -def _iter_built_with_prepended(installed, infos): - # type: (Candidate, Iterator[IndexCandidateInfo]) -> Iterator[Candidate] +def _iter_built_with_prepended( + installed: Candidate, infos: Iterator[IndexCandidateInfo] +) -> Iterator[Candidate]: """Iterator for ``FoundCandidates``. This iterator is used when the resolver prefers the already-installed @@ -47,7 +62,7 @@ def _iter_built_with_prepended(installed, infos): normal ordering, except skipped when the version is already installed. """ yield installed - versions_found = {installed.version} # type: Set[_BaseVersion] + versions_found: Set[_BaseVersion] = {installed.version} for version, func in infos: if version in versions_found: continue @@ -58,8 +73,9 @@ def _iter_built_with_prepended(installed, infos): versions_found.add(version) -def _iter_built_with_inserted(installed, infos): - # type: (Candidate, Iterator[IndexCandidateInfo]) -> Iterator[Candidate] +def _iter_built_with_inserted( + installed: Candidate, infos: Iterator[IndexCandidateInfo] +) -> Iterator[Candidate]: """Iterator for ``FoundCandidates``. This iterator is used when the resolver prefers to upgrade an @@ -70,7 +86,7 @@ def _iter_built_with_inserted(installed, infos): the installed candidate exactly once before we start yielding older or equivalent candidates, or after all other candidates if they are all newer. """ - versions_found = set() # type: Set[_BaseVersion] + versions_found: Set[_BaseVersion] = set() for version, func in infos: if version in versions_found: continue @@ -89,7 +105,7 @@ def _iter_built_with_inserted(installed, infos): yield installed -class FoundCandidates(collections_abc.Sequence): +class FoundCandidates(SequenceCandidate): """A lazy sequence to provide candidates to the resolver. The intended usage is to return this from `find_matches()` so the resolver @@ -110,15 +126,13 @@ class FoundCandidates(collections_abc.Sequence): self._prefers_installed = prefers_installed self._incompatible_ids = incompatible_ids - def __getitem__(self, index): - # type: (int) -> Candidate + def __getitem__(self, index: Any) -> Any: # Implemented to satisfy the ABC check. This is not needed by the # resolver, and should not be used by the provider either (for # performance reasons). raise NotImplementedError("don't do this") - def __iter__(self): - # type: () -> Iterator[Candidate] + def __iter__(self) -> Iterator[Candidate]: infos = self._get_infos() if not self._installed: iterator = _iter_built(infos) @@ -128,18 +142,14 @@ class FoundCandidates(collections_abc.Sequence): iterator = _iter_built_with_inserted(self._installed, infos) return (c for c in iterator if id(c) not in self._incompatible_ids) - def __len__(self): - # type: () -> int + def __len__(self) -> int: # Implemented to satisfy the ABC check. This is not needed by the # resolver, and should not be used by the provider either (for # performance reasons). raise NotImplementedError("don't do this") @functools.lru_cache(maxsize=1) - def __bool__(self): - # type: () -> bool + def __bool__(self) -> bool: if self._prefers_installed and self._installed: return True return any(self) - - __nonzero__ = __bool__ # XXX: Python 2. diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py index 0be58fd3ba83b637250f18031cf0f9dd6cae6790..315fb9c8902c5e3f4dd8419ccdf7d85c6718096e 100644 --- a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py +++ b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py @@ -1,8 +1,20 @@ -from typing import TYPE_CHECKING, Dict, Iterable, Iterator, Mapping, Sequence, Union +import collections +import math +from typing import ( + TYPE_CHECKING, + Dict, + Iterable, + Iterator, + Mapping, + Sequence, + TypeVar, + Union, +) from pip._vendor.resolvelib.providers import AbstractProvider from .base import Candidate, Constraint, Requirement +from .candidates import REQUIRES_PYTHON_IDENTIFIER from .factory import Factory if TYPE_CHECKING: @@ -34,6 +46,35 @@ else: # services to those objects (access to pip's finder and preparer). +D = TypeVar("D") +V = TypeVar("V") + + +def _get_with_identifier( + mapping: Mapping[str, V], + identifier: str, + default: D, +) -> Union[D, V]: + """Get item from a package name lookup mapping with a resolver identifier. + + This extra logic is needed when the target mapping is keyed by package + name, which cannot be directly looked up with an identifier (which may + contain requested extras). Additional logic is added to also look up a value + by "cleaning up" the extras from the identifier. + """ + if identifier in mapping: + return mapping[identifier] + # HACK: Theoretically we should check whether this identifier is a valid + # "NAME[EXTRAS]" format, and parse out the name part with packaging or + # some regular expression. But since pip's resolver only spits out three + # kinds of identifiers: normalized PEP 503 names, normalized names plus + # extras, and Requires-Python, we can cheat a bit here. + name, open_bracket, _ = identifier.partition("[") + if open_bracket and name in mapping: + return mapping[name] + return default + + class PipProvider(_ProviderBase): """Pip's provider implementation for resolvelib. @@ -47,21 +88,20 @@ class PipProvider(_ProviderBase): def __init__( self, - factory, # type: Factory - constraints, # type: Dict[str, Constraint] - ignore_dependencies, # type: bool - upgrade_strategy, # type: str - user_requested, # type: Dict[str, int] - ): - # type: (...) -> None + factory: Factory, + constraints: Dict[str, Constraint], + ignore_dependencies: bool, + upgrade_strategy: str, + user_requested: Dict[str, int], + ) -> None: self._factory = factory self._constraints = constraints self._ignore_dependencies = ignore_dependencies self._upgrade_strategy = upgrade_strategy self._user_requested = user_requested + self._known_depths: Dict[str, float] = collections.defaultdict(lambda: math.inf) - def identify(self, requirement_or_candidate): - # type: (Union[Requirement, Candidate]) -> str + def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str: return requirement_or_candidate.name def get_preference( @@ -69,69 +109,91 @@ class PipProvider(_ProviderBase): identifier: str, resolutions: Mapping[str, Candidate], candidates: Mapping[str, Iterator[Candidate]], - information: Mapping[str, Iterator["PreferenceInformation"]], + information: Mapping[str, Iterable["PreferenceInformation"]], + backtrack_causes: Sequence["PreferenceInformation"], ) -> "Preference": """Produce a sort key for given requirement based on preference. The lower the return value is, the more preferred this group of arguments is. - Currently pip considers the followings in order: - - * Prefer if any of the known requirements points to an explicit URL. - * If equal, prefer if any requirements contain ``===`` and ``==``. - * If equal, prefer if requirements include version constraints, e.g. - ``>=`` and ``<``. - * If equal, prefer user-specified (non-transitive) requirements, and - order user-specified requirements by the order they are specified. + Currently pip considers the following in order: + + * Prefer if any of the known requirements is "direct", e.g. points to an + explicit URL. + * If equal, prefer if any requirement is "pinned", i.e. contains + operator ``===`` or ``==``. + * If equal, calculate an approximate "depth" and resolve requirements + closer to the user-specified requirements first. If the depth cannot + by determined (eg: due to no matching parents), it is considered + infinite. + * Order user-specified requirements by the order they are specified. + * If equal, prefers "non-free" requirements, i.e. contains at least one + operator, such as ``>=`` or ``<``. * If equal, order alphabetically for consistency (helps debuggability). """ - - def _get_restrictive_rating(requirements): - # type: (Iterable[Requirement]) -> int - """Rate how restrictive a set of requirements are. - - ``Requirement.get_candidate_lookup()`` returns a 2-tuple for - lookup. The first element is ``Optional[Candidate]`` and the - second ``Optional[InstallRequirement]``. - - * If the requirement is an explicit one, the explicitly-required - candidate is returned as the first element. - * If the requirement is based on a PEP 508 specifier, the backing - ``InstallRequirement`` is returned as the second element. - - We use the first element to check whether there is an explicit - requirement, and the second for equality operator. - """ - lookups = (r.get_candidate_lookup() for r in requirements) - cands, ireqs = zip(*lookups) - if any(cand is not None for cand in cands): - return 0 - spec_sets = (ireq.specifier for ireq in ireqs if ireq) - operators = [ - specifier.operator for spec_set in spec_sets for specifier in spec_set - ] - if any(op in ("==", "===") for op in operators): - return 1 - if operators: - return 2 - # A "bare" requirement without any version requirements. - return 3 - - rating = _get_restrictive_rating(r for r, _ in information[identifier]) - order = self._user_requested.get(identifier, float("inf")) - - # HACK: Setuptools have a very long and solid backward compatibility - # track record, and extremely few projects would request a narrow, - # non-recent version range of it since that would break a lot things. - # (Most projects specify it only to request for an installer feature, - # which does not work, but that's another topic.) Intentionally - # delaying Setuptools helps reduce branches the resolver has to check. - # This serves as a temporary fix for issues like "apache-airlfow[all]" - # while we work on "proper" branch pruning techniques. - delay_this = identifier == "setuptools" - - return (delay_this, rating, order, identifier) + try: + next(iter(information[identifier])) + except StopIteration: + # There is no information for this identifier, so there's no known + # candidates. + has_information = False + else: + has_information = True + + if has_information: + lookups = (r.get_candidate_lookup() for r, _ in information[identifier]) + candidate, ireqs = zip(*lookups) + else: + candidate, ireqs = None, () + + operators = [ + specifier.operator + for specifier_set in (ireq.specifier for ireq in ireqs if ireq) + for specifier in specifier_set + ] + + direct = candidate is not None + pinned = any(op[:2] == "==" for op in operators) + unfree = bool(operators) + + try: + requested_order: Union[int, float] = self._user_requested[identifier] + except KeyError: + requested_order = math.inf + if has_information: + parent_depths = ( + self._known_depths[parent.name] if parent is not None else 0.0 + for _, parent in information[identifier] + ) + inferred_depth = min(d for d in parent_depths) + 1.0 + else: + inferred_depth = math.inf + else: + inferred_depth = 1.0 + self._known_depths[identifier] = inferred_depth + + requested_order = self._user_requested.get(identifier, math.inf) + + # Requires-Python has only one candidate and the check is basically + # free, so we always do it first to avoid needless work if it fails. + requires_python = identifier == REQUIRES_PYTHON_IDENTIFIER + + # Prefer the causes of backtracking on the assumption that the problem + # resolving the dependency tree is related to the failures that caused + # the backtracking + backtrack_cause = self.is_backtrack_cause(identifier, backtrack_causes) + + return ( + not requires_python, + not direct, + not pinned, + not backtrack_cause, + inferred_depth, + requested_order, + not unfree, + identifier, + ) def find_matches( self, @@ -139,8 +201,7 @@ class PipProvider(_ProviderBase): requirements: Mapping[str, Iterator[Requirement]], incompatibilities: Mapping[str, Iterator[Candidate]], ) -> Iterable[Candidate]: - def _eligible_for_upgrade(name): - # type: (str) -> bool + def _eligible_for_upgrade(identifier: str) -> bool: """Are upgrades allowed for this project? This checks the upgrade strategy, and whether the project was one @@ -154,22 +215,41 @@ class PipProvider(_ProviderBase): if self._upgrade_strategy == "eager": return True elif self._upgrade_strategy == "only-if-needed": - return name in self._user_requested + user_order = _get_with_identifier( + self._user_requested, + identifier, + default=None, + ) + return user_order is not None return False + constraint = _get_with_identifier( + self._constraints, + identifier, + default=Constraint.empty(), + ) return self._factory.find_candidates( identifier=identifier, requirements=requirements, - constraint=self._constraints.get(identifier, Constraint.empty()), + constraint=constraint, prefers_installed=(not _eligible_for_upgrade(identifier)), incompatibilities=incompatibilities, ) - def is_satisfied_by(self, requirement, candidate): - # type: (Requirement, Candidate) -> bool + def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool: return requirement.is_satisfied_by(candidate) - def get_dependencies(self, candidate): - # type: (Candidate) -> Sequence[Requirement] + def get_dependencies(self, candidate: Candidate) -> Sequence[Requirement]: with_requires = not self._ignore_dependencies return [r for r in candidate.iter_dependencies(with_requires) if r is not None] + + @staticmethod + def is_backtrack_cause( + identifier: str, backtrack_causes: Sequence["PreferenceInformation"] + ) -> bool: + for backtrack_cause in backtrack_causes: + if identifier == backtrack_cause.requirement.name: + return True + if backtrack_cause.parent and identifier == backtrack_cause.parent.name: + return True + return False diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py index 074583de0d985e7d6dce34b97d6d9aafefb34468..12adeff7b6eacafc9c8c655c8f6633622b646992 100644 --- a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py +++ b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/reporter.py @@ -10,69 +10,71 @@ logger = getLogger(__name__) class PipReporter(BaseReporter): - def __init__(self): - # type: () -> None - self.backtracks_by_package = defaultdict(int) # type: DefaultDict[str, int] + def __init__(self) -> None: + self.reject_count_by_package: DefaultDict[str, int] = defaultdict(int) - self._messages_at_backtrack = { + self._messages_at_reject_count = { 1: ( "pip is looking at multiple versions of {package_name} to " "determine which version is compatible with other " "requirements. This could take a while." ), 8: ( - "pip is looking at multiple versions of {package_name} to " + "pip is still looking at multiple versions of {package_name} to " "determine which version is compatible with other " "requirements. This could take a while." ), 13: ( "This is taking longer than usual. You might need to provide " "the dependency resolver with stricter constraints to reduce " - "runtime. If you want to abort this run, you can press " - "Ctrl + C to do so. To improve how pip performs, tell us what " - "happened here: https://pip.pypa.io/surveys/backtracking" + "runtime. See https://pip.pypa.io/warnings/backtracking for " + "guidance. If you want to abort this run, press Ctrl + C." ), } - def backtracking(self, candidate): - # type: (Candidate) -> None - self.backtracks_by_package[candidate.name] += 1 + def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None: + self.reject_count_by_package[candidate.name] += 1 - count = self.backtracks_by_package[candidate.name] - if count not in self._messages_at_backtrack: + count = self.reject_count_by_package[candidate.name] + if count not in self._messages_at_reject_count: return - message = self._messages_at_backtrack[count] + message = self._messages_at_reject_count[count] logger.info("INFO: %s", message.format(package_name=candidate.name)) + msg = "Will try a different candidate, due to conflict:" + for req_info in criterion.information: + req, parent = req_info.requirement, req_info.parent + # Inspired by Factory.get_installation_error + msg += "\n " + if parent: + msg += f"{parent.name} {parent.version} depends on " + else: + msg += "The user requested " + msg += req.format_for_error() + logger.debug(msg) + class PipDebuggingReporter(BaseReporter): """A reporter that does an info log for every event it sees.""" - def starting(self): - # type: () -> None + def starting(self) -> None: logger.info("Reporter.starting()") - def starting_round(self, index): - # type: (int) -> None + def starting_round(self, index: int) -> None: logger.info("Reporter.starting_round(%r)", index) - def ending_round(self, index, state): - # type: (int, Any) -> None + def ending_round(self, index: int, state: Any) -> None: logger.info("Reporter.ending_round(%r, state)", index) - def ending(self, state): - # type: (Any) -> None + def ending(self, state: Any) -> None: logger.info("Reporter.ending(%r)", state) - def adding_requirement(self, requirement, parent): - # type: (Requirement, Candidate) -> None + def adding_requirement(self, requirement: Requirement, parent: Candidate) -> None: logger.info("Reporter.adding_requirement(%r, %r)", requirement, parent) - def backtracking(self, candidate): - # type: (Candidate) -> None - logger.info("Reporter.backtracking(%r)", candidate) + def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None: + logger.info("Reporter.rejecting_candidate(%r, %r)", criterion, candidate) - def pinning(self, candidate): - # type: (Candidate) -> None + def pinning(self, candidate: Candidate) -> None: logger.info("Reporter.pinning(%r)", candidate) diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py index a7fcdd1e345e466c24b043fc2e7fe62ad072d528..06addc0ddce8d1fd1df15b26f8b45221a44737b6 100644 --- a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py +++ b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py @@ -7,78 +7,63 @@ from .base import Candidate, CandidateLookup, Requirement, format_name class ExplicitRequirement(Requirement): - def __init__(self, candidate): - # type: (Candidate) -> None + def __init__(self, candidate: Candidate) -> None: self.candidate = candidate - def __str__(self): - # type: () -> str + def __str__(self) -> str: return str(self.candidate) - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return "{class_name}({candidate!r})".format( class_name=self.__class__.__name__, candidate=self.candidate, ) @property - def project_name(self): - # type: () -> NormalizedName - # No need to canonicalise - the candidate did this + def project_name(self) -> NormalizedName: + # No need to canonicalize - the candidate did this return self.candidate.project_name @property - def name(self): - # type: () -> str - # No need to canonicalise - the candidate did this + def name(self) -> str: + # No need to canonicalize - the candidate did this return self.candidate.name - def format_for_error(self): - # type: () -> str + def format_for_error(self) -> str: return self.candidate.format_for_error() - def get_candidate_lookup(self): - # type: () -> CandidateLookup + def get_candidate_lookup(self) -> CandidateLookup: return self.candidate, None - def is_satisfied_by(self, candidate): - # type: (Candidate) -> bool + def is_satisfied_by(self, candidate: Candidate) -> bool: return candidate == self.candidate class SpecifierRequirement(Requirement): - def __init__(self, ireq): - # type: (InstallRequirement) -> None + def __init__(self, ireq: InstallRequirement) -> None: assert ireq.link is None, "This is a link, not a specifier" self._ireq = ireq self._extras = frozenset(ireq.extras) - def __str__(self): - # type: () -> str + def __str__(self) -> str: return str(self._ireq.req) - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return "{class_name}({requirement!r})".format( class_name=self.__class__.__name__, requirement=str(self._ireq.req), ) @property - def project_name(self): - # type: () -> NormalizedName + def project_name(self) -> NormalizedName: assert self._ireq.req, "Specifier-backed ireq is always PEP 508" return canonicalize_name(self._ireq.req.name) @property - def name(self): - # type: () -> str + def name(self) -> str: return format_name(self.project_name, self._extras) - def format_for_error(self): - # type: () -> str - + def format_for_error(self) -> str: # Convert comma-separated specifiers into "A, B, ..., F and G" # This makes the specifier a bit more "human readable", without # risking a change in meaning. (Hopefully! Not all edge cases have @@ -91,12 +76,10 @@ class SpecifierRequirement(Requirement): return ", ".join(parts[:-1]) + " and " + parts[-1] - def get_candidate_lookup(self): - # type: () -> CandidateLookup + def get_candidate_lookup(self) -> CandidateLookup: return None, self._ireq - def is_satisfied_by(self, candidate): - # type: (Candidate) -> bool + def is_satisfied_by(self, candidate: Candidate) -> bool: assert candidate.name == self.name, ( f"Internal issue: Candidate is not for this requirement " f"{candidate.name} vs {self.name}" @@ -112,44 +95,36 @@ class SpecifierRequirement(Requirement): class RequiresPythonRequirement(Requirement): """A requirement representing Requires-Python metadata.""" - def __init__(self, specifier, match): - # type: (SpecifierSet, Candidate) -> None + def __init__(self, specifier: SpecifierSet, match: Candidate) -> None: self.specifier = specifier self._candidate = match - def __str__(self): - # type: () -> str + def __str__(self) -> str: return f"Python {self.specifier}" - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return "{class_name}({specifier!r})".format( class_name=self.__class__.__name__, specifier=str(self.specifier), ) @property - def project_name(self): - # type: () -> NormalizedName + def project_name(self) -> NormalizedName: return self._candidate.project_name @property - def name(self): - # type: () -> str + def name(self) -> str: return self._candidate.name - def format_for_error(self): - # type: () -> str + def format_for_error(self) -> str: return str(self) - def get_candidate_lookup(self): - # type: () -> CandidateLookup + def get_candidate_lookup(self) -> CandidateLookup: if self.specifier.contains(self._candidate.version, prereleases=True): return self._candidate, None return None, None - def is_satisfied_by(self, candidate): - # type: (Candidate) -> bool + def is_satisfied_by(self, candidate: Candidate) -> bool: assert candidate.name == self._candidate.name, "Not Python candidate" # We can safely always allow prereleases here since PackageFinder # already implements the prerelease logic, and would have filtered out @@ -160,39 +135,31 @@ class RequiresPythonRequirement(Requirement): class UnsatisfiableRequirement(Requirement): """A requirement that cannot be satisfied.""" - def __init__(self, name): - # type: (NormalizedName) -> None + def __init__(self, name: NormalizedName) -> None: self._name = name - def __str__(self): - # type: () -> str + def __str__(self) -> str: return f"{self._name} (unavailable)" - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return "{class_name}({name!r})".format( class_name=self.__class__.__name__, name=str(self._name), ) @property - def project_name(self): - # type: () -> NormalizedName + def project_name(self) -> NormalizedName: return self._name @property - def name(self): - # type: () -> str + def name(self) -> str: return self._name - def format_for_error(self): - # type: () -> str + def format_for_error(self) -> str: return str(self) - def get_candidate_lookup(self): - # type: () -> CandidateLookup + def get_candidate_lookup(self) -> CandidateLookup: return None, None - def is_satisfied_by(self, candidate): - # type: (Candidate) -> bool + def is_satisfied_by(self, candidate: Candidate) -> bool: return False diff --git a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py index b90f82cfa262ce796317e3aff1de67a0f3b2aaa1..d5b238608b2af459e3db803edbe1b23a7955df7b 100644 --- a/env/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py +++ b/env/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py @@ -4,19 +4,14 @@ import os from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.packaging.version import parse as parse_version from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible from pip._vendor.resolvelib import Resolver as RLResolver from pip._vendor.resolvelib.structs import DirectedGraph from pip._internal.cache import WheelCache -from pip._internal.exceptions import InstallationError from pip._internal.index.package_finder import PackageFinder from pip._internal.operations.prepare import RequirementPreparer -from pip._internal.req.req_install import ( - InstallRequirement, - check_invalid_constraint_type, -) +from pip._internal.req.req_install import InstallRequirement from pip._internal.req.req_set import RequirementSet from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider from pip._internal.resolution.resolvelib.provider import PipProvider @@ -24,11 +19,8 @@ from pip._internal.resolution.resolvelib.reporter import ( PipDebuggingReporter, PipReporter, ) -from pip._internal.utils.deprecation import deprecated -from pip._internal.utils.filetypes import is_archive_file -from pip._internal.utils.misc import dist_is_editable -from .base import Candidate, Constraint, Requirement +from .base import Candidate, Requirement from .factory import Factory if TYPE_CHECKING: @@ -45,17 +37,17 @@ class Resolver(BaseResolver): def __init__( self, - preparer, # type: RequirementPreparer - finder, # type: PackageFinder - wheel_cache, # type: Optional[WheelCache] - make_install_req, # type: InstallRequirementProvider - use_user_site, # type: bool - ignore_dependencies, # type: bool - ignore_installed, # type: bool - ignore_requires_python, # type: bool - force_reinstall, # type: bool - upgrade_strategy, # type: str - py_version_info=None, # type: Optional[Tuple[int, ...]] + preparer: RequirementPreparer, + finder: PackageFinder, + wheel_cache: Optional[WheelCache], + make_install_req: InstallRequirementProvider, + use_user_site: bool, + ignore_dependencies: bool, + ignore_installed: bool, + ignore_requires_python: bool, + force_reinstall: bool, + upgrade_strategy: str, + py_version_info: Optional[Tuple[int, ...]] = None, ): super().__init__() assert upgrade_strategy in self._allowed_strategies @@ -73,65 +65,38 @@ class Resolver(BaseResolver): ) self.ignore_dependencies = ignore_dependencies self.upgrade_strategy = upgrade_strategy - self._result = None # type: Optional[Result] - - def resolve(self, root_reqs, check_supported_wheels): - # type: (List[InstallRequirement], bool) -> RequirementSet - - constraints = {} # type: Dict[str, Constraint] - user_requested = {} # type: Dict[str, int] - requirements = [] - for i, req in enumerate(root_reqs): - if req.constraint: - # Ensure we only accept valid constraints - problem = check_invalid_constraint_type(req) - if problem: - raise InstallationError(problem) - if not req.match_markers(): - continue - assert req.name, "Constraint must be named" - name = canonicalize_name(req.name) - if name in constraints: - constraints[name] &= req - else: - constraints[name] = Constraint.from_ireq(req) - else: - if req.user_supplied and req.name: - canonical_name = canonicalize_name(req.name) - if canonical_name not in user_requested: - user_requested[canonical_name] = i - r = self.factory.make_requirement_from_install_req( - req, requested_extras=() - ) - if r is not None: - requirements.append(r) + self._result: Optional[Result] = None + def resolve( + self, root_reqs: List[InstallRequirement], check_supported_wheels: bool + ) -> RequirementSet: + collected = self.factory.collect_root_requirements(root_reqs) provider = PipProvider( factory=self.factory, - constraints=constraints, + constraints=collected.constraints, ignore_dependencies=self.ignore_dependencies, upgrade_strategy=self.upgrade_strategy, - user_requested=user_requested, + user_requested=collected.user_requested, ) if "PIP_RESOLVER_DEBUG" in os.environ: - reporter = PipDebuggingReporter() # type: BaseReporter + reporter: BaseReporter = PipDebuggingReporter() else: reporter = PipReporter() - resolver = RLResolver( + resolver: RLResolver[Requirement, Candidate, str] = RLResolver( provider, reporter, - ) # type: RLResolver[Requirement, Candidate, str] + ) try: - try_to_avoid_resolution_too_deep = 2000000 + limit_how_complex_resolution_can_be = 200000 result = self._result = resolver.resolve( - requirements, max_rounds=try_to_avoid_resolution_too_deep + collected.requirements, max_rounds=limit_how_complex_resolution_can_be ) except ResolutionImpossible as e: error = self.factory.get_installation_error( cast("ResolutionImpossible[Requirement, Candidate]", e), - constraints, + collected.constraints, ) raise error from e @@ -150,10 +115,10 @@ class Resolver(BaseResolver): elif self.factory.force_reinstall: # The --force-reinstall flag is set -- reinstall. ireq.should_reinstall = True - elif parse_version(installed_dist.version) != candidate.version: + elif installed_dist.version != candidate.version: # The installation is different in version -- reinstall. ireq.should_reinstall = True - elif candidate.is_editable or dist_is_editable(installed_dist): + elif candidate.is_editable or installed_dist.editable: # The incoming distribution is editable, or different in # editable-ness to installation -- reinstall. ireq.should_reinstall = True @@ -169,25 +134,6 @@ class Resolver(BaseResolver): ) continue - looks_like_sdist = ( - is_archive_file(candidate.source_link.file_path) - and candidate.source_link.ext != ".zip" - ) - if looks_like_sdist: - # is a local sdist -- show a deprecation warning! - reason = ( - "Source distribution is being reinstalled despite an " - "installed package having the same name and version as " - "the installed package." - ) - replacement = "use --force-reinstall" - deprecated( - reason=reason, - replacement=replacement, - gone_in="21.2", - issue=8711, - ) - # is a local sdist or path -- reinstall ireq.should_reinstall = True else: @@ -213,10 +159,14 @@ class Resolver(BaseResolver): reqs = req_set.all_requirements self.factory.preparer.prepare_linked_requirements_more(reqs) + for req in reqs: + req.prepared = True + req.needs_more_preparation = False return req_set - def get_installation_order(self, req_set): - # type: (RequirementSet) -> List[InstallRequirement] + def get_installation_order( + self, req_set: RequirementSet + ) -> List[InstallRequirement]: """Get order for installation of requirements in RequirementSet. The returned list contains a requirement before another that depends on @@ -224,17 +174,19 @@ class Resolver(BaseResolver): get installed one-by-one. The current implementation creates a topological ordering of the - dependency graph, while breaking any cycles in the graph at arbitrary - points. We make no guarantees about where the cycle would be broken, - other than they would be broken. + dependency graph, giving more weight to packages with less + or no dependencies, while breaking any cycles in the graph at + arbitrary points. We make no guarantees about where the cycle + would be broken, other than it *would* be broken. """ assert self._result is not None, "must call resolve() first" + if not req_set.requirements: + # Nothing is left to install, so we do not need an order. + return [] + graph = self._result.graph - weights = get_topological_weights( - graph, - expected_node_count=len(self._result.mapping) + 1, - ) + weights = get_topological_weights(graph, set(req_set.requirements.keys())) sorted_items = sorted( req_set.requirements.items(), @@ -244,29 +196,38 @@ class Resolver(BaseResolver): return [ireq for _, ireq in sorted_items] -def get_topological_weights(graph, expected_node_count): - # type: (DirectedGraph[Optional[str]], int) -> Dict[Optional[str], int] +def get_topological_weights( + graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str] +) -> Dict[Optional[str], int]: """Assign weights to each node based on how "deep" they are. This implementation may change at any point in the future without prior notice. - We take the length for the longest path to any node from root, ignoring any - paths that contain a single node twice (i.e. cycles). This is done through - a depth-first search through the graph, while keeping track of the path to - the node. + We first simplify the dependency graph by pruning any leaves and giving them + the highest weight: a package without any dependencies should be installed + first. This is done again and again in the same way, giving ever less weight + to the newly found leaves. The loop stops when no leaves are left: all + remaining packages have at least one dependency left in the graph. + + Then we continue with the remaining graph, by taking the length for the + longest path to any node from root, ignoring any paths that contain a single + node twice (i.e. cycles). This is done through a depth-first search through + the graph, while keeping track of the path to the node. Cycles in the graph result would result in node being revisited while also - being it's own path. In this case, take no action. This helps ensure we + being on its own path. In this case, take no action. This helps ensure we don't get stuck in a cycle. When assigning weight, the longer path (i.e. larger length) is preferred. + + We are only interested in the weights of packages that are in the + requirement_keys. """ - path = set() # type: Set[Optional[str]] - weights = {} # type: Dict[Optional[str], int] + path: Set[Optional[str]] = set() + weights: Dict[Optional[str], int] = {} - def visit(node): - # type: (Optional[str]) -> None + def visit(node: Optional[str]) -> None: if node in path: # We hit a cycle, so we'll break it here. return @@ -277,24 +238,57 @@ def get_topological_weights(graph, expected_node_count): visit(child) path.remove(node) + if node not in requirement_keys: + return + last_known_parent_count = weights.get(node, 0) weights[node] = max(last_known_parent_count, len(path)) + # Simplify the graph, pruning leaves that have no dependencies. + # This is needed for large graphs (say over 200 packages) because the + # `visit` function is exponentially slower then, taking minutes. + # See https://github.com/pypa/pip/issues/10557 + # We will loop until we explicitly break the loop. + while True: + leaves = set() + for key in graph: + if key is None: + continue + for _child in graph.iter_children(key): + # This means we have at least one child + break + else: + # No child. + leaves.add(key) + if not leaves: + # We are done simplifying. + break + # Calculate the weight for the leaves. + weight = len(graph) - 1 + for leaf in leaves: + if leaf not in requirement_keys: + continue + weights[leaf] = weight + # Remove the leaves from the graph, making it simpler. + for leaf in leaves: + graph.remove(leaf) + + # Visit the remaining graph. # `None` is guaranteed to be the root node by resolvelib. visit(None) - # Sanity checks - assert weights[None] == 0 - assert len(weights) == expected_node_count + # Sanity check: all requirement keys should be in the weights, + # and no other keys should be in the weights. + difference = set(weights.keys()).difference(requirement_keys) + assert not difference, difference return weights def _req_set_item_sorter( - item, # type: Tuple[str, InstallRequirement] - weights, # type: Dict[Optional[str], int] -): - # type: (...) -> Tuple[int, str] + item: Tuple[str, InstallRequirement], + weights: Dict[Optional[str], int], +) -> Tuple[int, str]: """Key function used to sort install requirements for installation. Based on the "weight" mapping calculated in ``get_installation_order()``. diff --git a/env/Lib/site-packages/pip/_internal/self_outdated_check.py b/env/Lib/site-packages/pip/_internal/self_outdated_check.py index 6b24965b8029f7b384be4e9ec3dbc12abe17df72..41cc42c5677ddf0709d9eeb894eb8dbe4fd16f91 100644 --- a/env/Lib/site-packages/pip/_internal/self_outdated_check.py +++ b/env/Lib/site-packages/pip/_internal/self_outdated_check.py @@ -1,97 +1,149 @@ import datetime +import functools import hashlib import json import logging import optparse import os.path import sys -from typing import Any, Dict +from dataclasses import dataclass +from typing import Any, Callable, Dict, Optional from pip._vendor.packaging.version import parse as parse_version +from pip._vendor.rich.console import Group +from pip._vendor.rich.markup import escape +from pip._vendor.rich.text import Text from pip._internal.index.collector import LinkCollector from pip._internal.index.package_finder import PackageFinder from pip._internal.metadata import get_default_environment +from pip._internal.metadata.base import DistributionVersion from pip._internal.models.selection_prefs import SelectionPreferences from pip._internal.network.session import PipSession +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.entrypoints import ( + get_best_invocation_for_this_pip, + get_best_invocation_for_this_python, +) from pip._internal.utils.filesystem import adjacent_tmp_file, check_path_owner, replace from pip._internal.utils.misc import ensure_dir -SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" +_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" logger = logging.getLogger(__name__) -def _get_statefile_name(key): - # type: (str) -> str +def _get_statefile_name(key: str) -> str: key_bytes = key.encode() name = hashlib.sha224(key_bytes).hexdigest() return name class SelfCheckState: - def __init__(self, cache_dir): - # type: (str) -> None - self.state = {} # type: Dict[str, Any] - self.statefile_path = None + def __init__(self, cache_dir: str) -> None: + self._state: Dict[str, Any] = {} + self._statefile_path = None # Try to load the existing state if cache_dir: - self.statefile_path = os.path.join( + self._statefile_path = os.path.join( cache_dir, "selfcheck", _get_statefile_name(self.key) ) try: - with open(self.statefile_path, encoding="utf-8") as statefile: - self.state = json.load(statefile) + with open(self._statefile_path, encoding="utf-8") as statefile: + self._state = json.load(statefile) except (OSError, ValueError, KeyError): # Explicitly suppressing exceptions, since we don't want to # error out if the cache file is invalid. pass @property - def key(self): - # type: () -> str + def key(self) -> str: return sys.prefix - def save(self, pypi_version, current_time): - # type: (str, datetime.datetime) -> None + def get(self, current_time: datetime.datetime) -> Optional[str]: + """Check if we have a not-outdated version loaded already.""" + if not self._state: + return None + + if "last_check" not in self._state: + return None + + if "pypi_version" not in self._state: + return None + + seven_days_in_seconds = 7 * 24 * 60 * 60 + + # Determine if we need to refresh the state + last_check = datetime.datetime.strptime(self._state["last_check"], _DATE_FMT) + seconds_since_last_check = (current_time - last_check).total_seconds() + if seconds_since_last_check > seven_days_in_seconds: + return None + + return self._state["pypi_version"] + + def set(self, pypi_version: str, current_time: datetime.datetime) -> None: # If we do not have a path to cache in, don't bother saving. - if not self.statefile_path: + if not self._statefile_path: return # Check to make sure that we own the directory - if not check_path_owner(os.path.dirname(self.statefile_path)): + if not check_path_owner(os.path.dirname(self._statefile_path)): return # Now that we've ensured the directory is owned by this user, we'll go # ahead and make sure that all our directories are created. - ensure_dir(os.path.dirname(self.statefile_path)) + ensure_dir(os.path.dirname(self._statefile_path)) state = { # Include the key so it's easy to tell which pip wrote the # file. "key": self.key, - "last_check": current_time.strftime(SELFCHECK_DATE_FMT), + "last_check": current_time.strftime(_DATE_FMT), "pypi_version": pypi_version, } text = json.dumps(state, sort_keys=True, separators=(",", ":")) - with adjacent_tmp_file(self.statefile_path) as f: + with adjacent_tmp_file(self._statefile_path) as f: f.write(text.encode()) try: # Since we have a prefix-specific state file, we can just # overwrite whatever is there, no need to check. - replace(f.name, self.statefile_path) + replace(f.name, self._statefile_path) except OSError: # Best effort. pass -def was_installed_by_pip(pkg): - # type: (str) -> bool +@dataclass +class UpgradePrompt: + old: str + new: str + + def __rich__(self) -> Group: + if WINDOWS: + pip_cmd = f"{get_best_invocation_for_this_python()} -m pip" + else: + pip_cmd = get_best_invocation_for_this_pip() + + notice = "[bold][[reset][blue]notice[reset][bold]][reset]" + return Group( + Text(), + Text.from_markup( + f"{notice} A new release of pip is available: " + f"[red]{self.old}[reset] -> [green]{self.new}[reset]" + ), + Text.from_markup( + f"{notice} To update, run: " + f"[green]{escape(pip_cmd)} install --upgrade pip" + ), + ) + + +def was_installed_by_pip(pkg: str) -> bool: """Checks whether pkg was installed by pip This is used not to display the upgrade message when pip is in fact @@ -101,8 +153,69 @@ def was_installed_by_pip(pkg): return dist is not None and "pip" == dist.installer -def pip_self_version_check(session, options): - # type: (PipSession, optparse.Values) -> None +def _get_current_remote_pip_version( + session: PipSession, options: optparse.Values +) -> Optional[str]: + # Lets use PackageFinder to see what the latest pip version is + link_collector = LinkCollector.create( + session, + options=options, + suppress_no_index=True, + ) + + # Pass allow_yanked=False so we don't suggest upgrading to a + # yanked version. + selection_prefs = SelectionPreferences( + allow_yanked=False, + allow_all_prereleases=False, # Explicitly set to False + ) + + finder = PackageFinder.create( + link_collector=link_collector, + selection_prefs=selection_prefs, + ) + best_candidate = finder.find_best_candidate("pip").best_candidate + if best_candidate is None: + return None + + return str(best_candidate.version) + + +def _self_version_check_logic( + *, + state: SelfCheckState, + current_time: datetime.datetime, + local_version: DistributionVersion, + get_remote_version: Callable[[], Optional[str]], +) -> Optional[UpgradePrompt]: + remote_version_str = state.get(current_time) + if remote_version_str is None: + remote_version_str = get_remote_version() + if remote_version_str is None: + logger.debug("No remote pip version found") + return None + state.set(remote_version_str, current_time) + + remote_version = parse_version(remote_version_str) + logger.debug("Remote version of pip: %s", remote_version) + logger.debug("Local version of pip: %s", local_version) + + pip_installed_by_pip = was_installed_by_pip("pip") + logger.debug("Was pip installed by pip? %s", pip_installed_by_pip) + if not pip_installed_by_pip: + return None # Only suggest upgrade if pip is installed by pip. + + local_version_is_older = ( + local_version < remote_version + and local_version.base_version != remote_version.base_version + ) + if local_version_is_older: + return UpgradePrompt(old=str(local_version), new=remote_version_str) + + return None + + +def pip_self_version_check(session: PipSession, options: optparse.Values) -> None: """Check for an update for pip. Limit the frequency of checks to once per week. State is stored either in @@ -113,75 +226,17 @@ def pip_self_version_check(session, options): if not installed_dist: return - pip_version = installed_dist.version - pypi_version = None - try: - state = SelfCheckState(cache_dir=options.cache_dir) - - current_time = datetime.datetime.utcnow() - # Determine if we need to refresh the state - if "last_check" in state.state and "pypi_version" in state.state: - last_check = datetime.datetime.strptime( - state.state["last_check"], - SELFCHECK_DATE_FMT - ) - if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60: - pypi_version = state.state["pypi_version"] - - # Refresh the version if we need to or just see if we need to warn - if pypi_version is None: - # Lets use PackageFinder to see what the latest pip version is - link_collector = LinkCollector.create( - session, - options=options, - suppress_no_index=True, - ) - - # Pass allow_yanked=False so we don't suggest upgrading to a - # yanked version. - selection_prefs = SelectionPreferences( - allow_yanked=False, - allow_all_prereleases=False, # Explicitly set to False - ) - - finder = PackageFinder.create( - link_collector=link_collector, - selection_prefs=selection_prefs, - ) - best_candidate = finder.find_best_candidate("pip").best_candidate - if best_candidate is None: - return - pypi_version = str(best_candidate.version) - - # save that we've performed a check - state.save(pypi_version, current_time) - - remote_version = parse_version(pypi_version) - - local_version_is_older = ( - pip_version < remote_version and - pip_version.base_version != remote_version.base_version and - was_installed_by_pip('pip') - ) - - # Determine if our pypi_version is older - if not local_version_is_older: - return - - # We cannot tell how the current pip is available in the current - # command context, so be pragmatic here and suggest the command - # that's always available. This does not accommodate spaces in - # `sys.executable`. - pip_cmd = f"{sys.executable} -m pip" - logger.warning( - "You are using pip version %s; however, version %s is " - "available.\nYou should consider upgrading via the " - "'%s install --upgrade pip' command.", - pip_version, pypi_version, pip_cmd + upgrade_prompt = _self_version_check_logic( + state=SelfCheckState(cache_dir=options.cache_dir), + current_time=datetime.datetime.utcnow(), + local_version=installed_dist.version, + get_remote_version=functools.partial( + _get_current_remote_pip_version, session, options + ), ) + if upgrade_prompt is not None: + logger.warning("[present-rich] %s", upgrade_prompt) except Exception: - logger.debug( - "There was an error checking the latest version of pip", - exc_info=True, - ) + logger.warning("There was an error checking the latest version of pip.") + logger.debug("See below for error", exc_info=True) diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 2997301191e1b1b23f4222c74ddc7258de73b129..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-39.pyc deleted file mode 100644 index fa92e190af6935c1655f39a0ffe775e7213df4c5..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-39.pyc deleted file mode 100644 index 00817bf20d6db4842b9ce3ba876a0945676173be..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/compat.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-39.pyc deleted file mode 100644 index e8b73ef46e18441ca00f37a2447dde87cd82b284..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/compatibility_tags.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-39.pyc deleted file mode 100644 index 5ae87dd241b31dc256f46cf157006f7374700288..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-39.pyc deleted file mode 100644 index f87f203a1a780bafa8cdf38ab185f83a93683611..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-39.pyc deleted file mode 100644 index d624dbad272b2a43a6082a5bbef9d23c32568e4a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/distutils_args.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/distutils_args.cpython-39.pyc deleted file mode 100644 index 22d93b2e905cf9ed1b6af5e7835d5f69efdfd641..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/distutils_args.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/encoding.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/encoding.cpython-39.pyc deleted file mode 100644 index 5f905df6bc2cc5f1a6aca28c3698c201a885b9fa..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/encoding.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-39.pyc deleted file mode 100644 index 07ab05c6c17ce26a339cb64faf1c02702f886321..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-39.pyc deleted file mode 100644 index 0ab897ae70df52b983fb6e87b317b6445830dcd5..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-39.pyc deleted file mode 100644 index ff347931b644fbe420976179505f5db66e2c23ce..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-39.pyc deleted file mode 100644 index 0a28d4e950dc2685cac33be4673a963912f0001e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-39.pyc deleted file mode 100644 index 3726aca5e49fe4ee8a7d88b91f16c686416a02d1..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/inject_securetransport.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/inject_securetransport.cpython-39.pyc deleted file mode 100644 index e405e417c5547dc3244fcd9e8dfd977847221041..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/inject_securetransport.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-39.pyc deleted file mode 100644 index 275dd1841a21a89dadec9ec5c1bd5fe84a62fc85..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/logging.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-39.pyc deleted file mode 100644 index d1d9cdd1045a77c3c7b91383a41f1d60575fea34..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/misc.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/models.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/models.cpython-39.pyc deleted file mode 100644 index 7e5658947d4e56549e61b81f2f7b8fcd5e0c8104..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/models.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-39.pyc deleted file mode 100644 index 41412e51b9b6baac7b12fd70ad64f779ecf6b91f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/parallel.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/parallel.cpython-39.pyc deleted file mode 100644 index 5ca6316e8195e4b8d041e10039cbaf0c7b5fba0b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/parallel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/pkg_resources.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/pkg_resources.cpython-39.pyc deleted file mode 100644 index aab84fd75089bce6cde33da7db8cc0353125457d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/pkg_resources.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/setuptools_build.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/setuptools_build.cpython-39.pyc deleted file mode 100644 index 365c4f76fac72090814c87536d41df6dc9fed473..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/setuptools_build.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-39.pyc deleted file mode 100644 index 2c6dc2f6699ca24d2cdb0ee868bed92452d0451a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-39.pyc deleted file mode 100644 index 16fa511a9a099511408c14826654b5c322810b34..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-39.pyc deleted file mode 100644 index ca9359c8b90c0d2c0a3c74be2ba1572204c44d52..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-39.pyc deleted file mode 100644 index d14a48542989483ea7bfef2df4d02bf1cfb60b95..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/urls.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-39.pyc deleted file mode 100644 index ece4546e838972f31df095ab232dbdca09b6d9e2..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/virtualenv.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-39.pyc deleted file mode 100644 index 1eeb051528b73b8f799f6df72632f70577d7db4b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/utils/appdirs.py b/env/Lib/site-packages/pip/_internal/utils/appdirs.py index db974dad63573c0a751e3dd3dfe2a5dc96fa01bd..16933bf8afedcbe3e9d4fcc04e5f7246228c56fc 100644 --- a/env/Lib/site-packages/pip/_internal/utils/appdirs.py +++ b/env/Lib/site-packages/pip/_internal/utils/appdirs.py @@ -7,32 +7,46 @@ and eventually drop this after all usages are changed. """ import os +import sys from typing import List -from pip._vendor import appdirs as _appdirs +from pip._vendor import platformdirs as _appdirs -def user_cache_dir(appname): - # type: (str) -> str +def user_cache_dir(appname: str) -> str: return _appdirs.user_cache_dir(appname, appauthor=False) -def user_config_dir(appname, roaming=True): - # type: (str, bool) -> str - path = _appdirs.user_config_dir(appname, appauthor=False, roaming=roaming) - if _appdirs.system == "darwin" and not os.path.isdir(path): - path = os.path.expanduser("~/.config/") - if appname: - path = os.path.join(path, appname) - return path +def _macos_user_config_dir(appname: str, roaming: bool = True) -> str: + # Use ~/Application Support/pip, if the directory exists. + path = _appdirs.user_data_dir(appname, appauthor=False, roaming=roaming) + if os.path.isdir(path): + return path + + # Use a Linux-like ~/.config/pip, by default. + linux_like_path = "~/.config/" + if appname: + linux_like_path = os.path.join(linux_like_path, appname) + + return os.path.expanduser(linux_like_path) + + +def user_config_dir(appname: str, roaming: bool = True) -> str: + if sys.platform == "darwin": + return _macos_user_config_dir(appname, roaming) + + return _appdirs.user_config_dir(appname, appauthor=False, roaming=roaming) # for the discussion regarding site_config_dir locations # see <https://github.com/pypa/pip/issues/1733> -def site_config_dirs(appname): - # type: (str) -> List[str] +def site_config_dirs(appname: str) -> List[str]: + if sys.platform == "darwin": + return [_appdirs.site_data_dir(appname, appauthor=False, multipath=True)] + dirval = _appdirs.site_config_dir(appname, appauthor=False, multipath=True) - if _appdirs.system not in ["win32", "darwin"]: - # always look in /etc directly as well - return dirval.split(os.pathsep) + ["/etc"] - return [dirval] + if sys.platform == "win32": + return [dirval] + + # Unix-y system. Look in /etc as well. + return dirval.split(os.pathsep) + ["/etc"] diff --git a/env/Lib/site-packages/pip/_internal/utils/compat.py b/env/Lib/site-packages/pip/_internal/utils/compat.py index 1fb2dc729e097244167b48ba582e792aa5d1f8d1..3f4d300cef077e698989245562375a9444d983fa 100644 --- a/env/Lib/site-packages/pip/_internal/utils/compat.py +++ b/env/Lib/site-packages/pip/_internal/utils/compat.py @@ -11,8 +11,7 @@ __all__ = ["get_path_uid", "stdlib_pkgs", "WINDOWS"] logger = logging.getLogger(__name__) -def has_tls(): - # type: () -> bool +def has_tls() -> bool: try: import _ssl # noqa: F401 # ignore unused @@ -25,8 +24,7 @@ def has_tls(): return IS_PYOPENSSL -def get_path_uid(path): - # type: (str) -> int +def get_path_uid(path: str) -> int: """ Return path's uid. diff --git a/env/Lib/site-packages/pip/_internal/utils/compatibility_tags.py b/env/Lib/site-packages/pip/_internal/utils/compatibility_tags.py index 14fe51c1a519300e966d1c7e9be292f95f59b209..b6ed9a78e552806cb23d8ac48ada6d41db5b4de5 100644 --- a/env/Lib/site-packages/pip/_internal/utils/compatibility_tags.py +++ b/env/Lib/site-packages/pip/_internal/utils/compatibility_tags.py @@ -2,9 +2,10 @@ """ import re -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import List, Optional, Tuple from pip._vendor.packaging.tags import ( + PythonVersion, Tag, compatible_tags, cpython_tags, @@ -14,21 +15,15 @@ from pip._vendor.packaging.tags import ( mac_platforms, ) -if TYPE_CHECKING: - from pip._vendor.packaging.tags import PythonVersion - - _osx_arch_pat = re.compile(r"(.+)_(\d+)_(\d+)_(.+)") -def version_info_to_nodot(version_info): - # type: (Tuple[int, ...]) -> str +def version_info_to_nodot(version_info: Tuple[int, ...]) -> str: # Only use up to the first two numbers. return "".join(map(str, version_info[:2])) -def _mac_platforms(arch): - # type: (str) -> List[str] +def _mac_platforms(arch: str) -> List[str]: match = _osx_arch_pat.match(arch) if match: name, major, minor, actual_arch = match.groups() @@ -48,8 +43,7 @@ def _mac_platforms(arch): return arches -def _custom_manylinux_platforms(arch): - # type: (str) -> List[str] +def _custom_manylinux_platforms(arch: str) -> List[str]: arches = [arch] arch_prefix, arch_sep, arch_suffix = arch.partition("_") if arch_prefix == "manylinux2014": @@ -70,8 +64,7 @@ def _custom_manylinux_platforms(arch): return arches -def _get_custom_platforms(arch): - # type: (str) -> List[str] +def _get_custom_platforms(arch: str) -> List[str]: arch_prefix, arch_sep, arch_suffix = arch.partition("_") if arch.startswith("macosx"): arches = _mac_platforms(arch) @@ -82,8 +75,7 @@ def _get_custom_platforms(arch): return arches -def _expand_allowed_platforms(platforms): - # type: (Optional[List[str]]) -> Optional[List[str]] +def _expand_allowed_platforms(platforms: Optional[List[str]]) -> Optional[List[str]]: if not platforms: return None @@ -100,16 +92,16 @@ def _expand_allowed_platforms(platforms): return result -def _get_python_version(version): - # type: (str) -> PythonVersion +def _get_python_version(version: str) -> PythonVersion: if len(version) > 1: return int(version[0]), int(version[1:]) else: return (int(version[0]),) -def _get_custom_interpreter(implementation=None, version=None): - # type: (Optional[str], Optional[str]) -> str +def _get_custom_interpreter( + implementation: Optional[str] = None, version: Optional[str] = None +) -> str: if implementation is None: implementation = interpreter_name() if version is None: @@ -118,12 +110,11 @@ def _get_custom_interpreter(implementation=None, version=None): def get_supported( - version=None, # type: Optional[str] - platforms=None, # type: Optional[List[str]] - impl=None, # type: Optional[str] - abis=None, # type: Optional[List[str]] -): - # type: (...) -> List[Tag] + version: Optional[str] = None, + platforms: Optional[List[str]] = None, + impl: Optional[str] = None, + abis: Optional[List[str]] = None, +) -> List[Tag]: """Return a list of supported tags for each version specified in `versions`. @@ -136,9 +127,9 @@ def get_supported( :param abis: specify a list of abis you want valid tags for, or None. If None, use the local interpreter abi. """ - supported = [] # type: List[Tag] + supported: List[Tag] = [] - python_version = None # type: Optional[PythonVersion] + python_version: Optional[PythonVersion] = None if version is not None: python_version = _get_python_version(version) diff --git a/env/Lib/site-packages/pip/_internal/utils/datetime.py b/env/Lib/site-packages/pip/_internal/utils/datetime.py index b638646c8bb22ac31b78f588cce4715709edd76f..8668b3b0ec1deec2aeb7ff6bd94265d6705e05bf 100644 --- a/env/Lib/site-packages/pip/_internal/utils/datetime.py +++ b/env/Lib/site-packages/pip/_internal/utils/datetime.py @@ -4,8 +4,7 @@ import datetime -def today_is_later_than(year, month, day): - # type: (int, int, int) -> bool +def today_is_later_than(year: int, month: int, day: int) -> bool: today = datetime.date.today() given = datetime.date(year, month, day) diff --git a/env/Lib/site-packages/pip/_internal/utils/deprecation.py b/env/Lib/site-packages/pip/_internal/utils/deprecation.py index b62b3fb65098d5d42473035873d52e10fa5209df..72bd6f25a554b303d0bf5028145cf3a5c71b3e06 100644 --- a/env/Lib/site-packages/pip/_internal/utils/deprecation.py +++ b/env/Lib/site-packages/pip/_internal/utils/deprecation.py @@ -8,7 +8,7 @@ from typing import Any, Optional, TextIO, Type, Union from pip._vendor.packaging.version import parse -from pip import __version__ as current_version +from pip import __version__ as current_version # NOTE: tests patch this name. DEPRECATION_MSG_PREFIX = "DEPRECATION: " @@ -17,19 +17,18 @@ class PipDeprecationWarning(Warning): pass -_original_showwarning = None # type: Any +_original_showwarning: Any = None # Warnings <-> Logging Integration def _showwarning( - message, # type: Union[Warning, str] - category, # type: Type[Warning] - filename, # type: str - lineno, # type: int - file=None, # type: Optional[TextIO] - line=None, # type: Optional[str] -): - # type: (...) -> None + message: Union[Warning, str], + category: Type[Warning], + filename: str, + lineno: int, + file: Optional[TextIO] = None, + line: Optional[str] = None, +) -> None: if file is not None: if _original_showwarning is not None: _original_showwarning(message, category, filename, lineno, file, line) @@ -42,8 +41,7 @@ def _showwarning( _original_showwarning(message, category, filename, lineno, file, line) -def install_warning_logger(): - # type: () -> None +def install_warning_logger() -> None: # Enable our Deprecation Warnings warnings.simplefilter("default", PipDeprecationWarning, append=True) @@ -54,49 +52,69 @@ def install_warning_logger(): warnings.showwarning = _showwarning -def deprecated(reason, replacement, gone_in, issue=None): - # type: (str, Optional[str], Optional[str], Optional[int]) -> None +def deprecated( + *, + reason: str, + replacement: Optional[str], + gone_in: Optional[str], + feature_flag: Optional[str] = None, + issue: Optional[int] = None, +) -> None: """Helper to deprecate existing functionality. reason: Textual reason shown to the user about why this functionality has - been deprecated. + been deprecated. Should be a complete sentence. replacement: Textual suggestion shown to the user about what alternative functionality they can use. gone_in: The version of pip does this functionality should get removed in. - Raises errors if pip's current version is greater than or equal to + Raises an error if pip's current version is greater than or equal to this. + feature_flag: + Command-line flag of the form --use-feature={feature_flag} for testing + upcoming functionality. issue: Issue number on the tracker that would serve as a useful place for users to find related discussion and provide feedback. - - Always pass replacement, gone_in and issue as keyword arguments for clarity - at the call site. """ - # Construct a nice message. - # This is eagerly formatted as we want it to get logged as if someone - # typed this entire message out. - sentences = [ - (reason, DEPRECATION_MSG_PREFIX + "{}"), - (gone_in, "pip {} will remove support for this functionality."), - (replacement, "A possible replacement is {}."), + # Determine whether or not the feature is already gone in this version. + is_gone = gone_in is not None and parse(current_version) >= parse(gone_in) + + message_parts = [ + (reason, f"{DEPRECATION_MSG_PREFIX}{{}}"), + ( + gone_in, + "pip {} will enforce this behaviour change." + if not is_gone + else "Since pip {}, this is no longer supported.", + ), + ( + replacement, + "A possible replacement is {}.", + ), + ( + feature_flag, + "You can use the flag --use-feature={} to test the upcoming behaviour." + if not is_gone + else None, + ), ( issue, - ( - "You can find discussion regarding this at " - "https://github.com/pypa/pip/issues/{}." - ), + "Discussion can be found at https://github.com/pypa/pip/issues/{}", ), ] + message = " ".join( - template.format(val) for val, template in sentences if val is not None + format_str.format(value) + for value, format_str in message_parts + if format_str is not None and value is not None ) - # Raise as an error if it has to be removed. - if gone_in is not None and parse(current_version) >= parse(gone_in): + # Raise as an error if this behaviour is deprecated. + if is_gone: raise PipDeprecationWarning(message) warnings.warn(message, category=PipDeprecationWarning, stacklevel=2) diff --git a/env/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py b/env/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py index eb50ac42be82806e6a64113531658849c8303a93..0e8e5e1608b911e789a3d346ebe48aa7cc54b79e 100644 --- a/env/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py +++ b/env/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py @@ -1,25 +1,12 @@ -import json -import logging from typing import Optional -from pip._vendor.pkg_resources import Distribution - -from pip._internal.models.direct_url import ( - DIRECT_URL_METADATA_NAME, - ArchiveInfo, - DirectUrl, - DirectUrlValidationError, - DirInfo, - VcsInfo, -) +from pip._internal.models.direct_url import ArchiveInfo, DirectUrl, DirInfo, VcsInfo from pip._internal.models.link import Link +from pip._internal.utils.urls import path_to_url from pip._internal.vcs import vcs -logger = logging.getLogger(__name__) - -def direct_url_as_pep440_direct_reference(direct_url, name): - # type: (DirectUrl, str) -> str +def direct_url_as_pep440_direct_reference(direct_url: DirectUrl, name: str) -> str: """Convert a DirectUrl to a pip requirement string.""" direct_url.validate() # if invalid, this is a pip bug requirement = name + " @ " @@ -42,8 +29,16 @@ def direct_url_as_pep440_direct_reference(direct_url, name): return requirement -def direct_url_from_link(link, source_dir=None, link_is_in_wheel_cache=False): - # type: (Link, Optional[str], bool) -> DirectUrl +def direct_url_for_editable(source_dir: str) -> DirectUrl: + return DirectUrl( + url=path_to_url(source_dir), + info=DirInfo(editable=True), + ) + + +def direct_url_from_link( + link: Link, source_dir: Optional[str] = None, link_is_in_wheel_cache: bool = False +) -> DirectUrl: if link.is_vcs: vcs_backend = vcs.get_backend_for_scheme(link.scheme) assert vcs_backend @@ -90,28 +85,3 @@ def direct_url_from_link(link, source_dir=None, link_is_in_wheel_cache=False): info=ArchiveInfo(hash=hash), subdirectory=link.subdirectory_fragment, ) - - -def dist_get_direct_url(dist): - # type: (Distribution) -> Optional[DirectUrl] - """Obtain a DirectUrl from a pkg_resource.Distribution. - - Returns None if the distribution has no `direct_url.json` metadata, - or if `direct_url.json` is invalid. - """ - if not dist.has_metadata(DIRECT_URL_METADATA_NAME): - return None - try: - return DirectUrl.from_json(dist.get_metadata(DIRECT_URL_METADATA_NAME)) - except ( - DirectUrlValidationError, - json.JSONDecodeError, - UnicodeDecodeError, - ) as e: - logger.warning( - "Error parsing %s for %s: %s", - DIRECT_URL_METADATA_NAME, - dist.project_name, - e, - ) - return None diff --git a/env/Lib/site-packages/pip/_internal/utils/distutils_args.py b/env/Lib/site-packages/pip/_internal/utils/distutils_args.py deleted file mode 100644 index e886c8884d0368ac7ae168a0ce44803b50ed8e2c..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_internal/utils/distutils_args.py +++ /dev/null @@ -1,43 +0,0 @@ -from distutils.errors import DistutilsArgError -from distutils.fancy_getopt import FancyGetopt -from typing import Dict, List - -_options = [ - ("exec-prefix=", None, ""), - ("home=", None, ""), - ("install-base=", None, ""), - ("install-data=", None, ""), - ("install-headers=", None, ""), - ("install-lib=", None, ""), - ("install-platlib=", None, ""), - ("install-purelib=", None, ""), - ("install-scripts=", None, ""), - ("prefix=", None, ""), - ("root=", None, ""), - ("user", None, ""), -] - - -# typeshed doesn't permit Tuple[str, None, str], see python/typeshed#3469. -_distutils_getopt = FancyGetopt(_options) # type: ignore - - -def parse_distutils_args(args): - # type: (List[str]) -> Dict[str, str] - """Parse provided arguments, returning an object that has the - matched arguments. - - Any unknown arguments are ignored. - """ - result = {} - for arg in args: - try: - _, match = _distutils_getopt.getopt(args=[arg]) - except DistutilsArgError: - # We don't care about any other options, which here may be - # considered unrecognized since our option list is not - # exhaustive. - pass - else: - result.update(match.__dict__) - return result diff --git a/env/Lib/site-packages/pip/_internal/utils/encoding.py b/env/Lib/site-packages/pip/_internal/utils/encoding.py index 7c8893d559e41fb1818cd3018a549c6694780033..008f06a79bf598b149bdccb73e572d13331a1631 100644 --- a/env/Lib/site-packages/pip/_internal/utils/encoding.py +++ b/env/Lib/site-packages/pip/_internal/utils/encoding.py @@ -4,7 +4,7 @@ import re import sys from typing import List, Tuple -BOMS = [ +BOMS: List[Tuple[bytes, str]] = [ (codecs.BOM_UTF8, "utf-8"), (codecs.BOM_UTF16, "utf-16"), (codecs.BOM_UTF16_BE, "utf-16-be"), @@ -12,13 +12,12 @@ BOMS = [ (codecs.BOM_UTF32, "utf-32"), (codecs.BOM_UTF32_BE, "utf-32-be"), (codecs.BOM_UTF32_LE, "utf-32-le"), -] # type: List[Tuple[bytes, str]] +] -ENCODING_RE = re.compile(br"coding[:=]\s*([-\w.]+)") +ENCODING_RE = re.compile(rb"coding[:=]\s*([-\w.]+)") -def auto_decode(data): - # type: (bytes) -> str +def auto_decode(data: bytes) -> str: """Check a bytes string for a BOM to correctly detect the encoding Fallback to locale.getpreferredencoding(False) like open() on Python3""" diff --git a/env/Lib/site-packages/pip/_internal/utils/entrypoints.py b/env/Lib/site-packages/pip/_internal/utils/entrypoints.py index 879bf21ac5f6f246d5d9fae5429b32b297bc717a..150136938548af6aa5ae1f716b330d0eb2d3e013 100644 --- a/env/Lib/site-packages/pip/_internal/utils/entrypoints.py +++ b/env/Lib/site-packages/pip/_internal/utils/entrypoints.py @@ -1,11 +1,26 @@ +import itertools +import os +import shutil import sys from typing import List, Optional from pip._internal.cli.main import main +from pip._internal.utils.compat import WINDOWS +_EXECUTABLE_NAMES = [ + "pip", + f"pip{sys.version_info.major}", + f"pip{sys.version_info.major}.{sys.version_info.minor}", +] +if WINDOWS: + _allowed_extensions = {"", ".exe"} + _EXECUTABLE_NAMES = [ + "".join(parts) + for parts in itertools.product(_EXECUTABLE_NAMES, _allowed_extensions) + ] -def _wrapper(args=None): - # type: (Optional[List[str]]) -> int + +def _wrapper(args: Optional[List[str]] = None) -> int: """Central wrapper for all old entrypoints. Historically pip has had several entrypoints defined. Because of issues @@ -26,3 +41,44 @@ def _wrapper(args=None): "running pip directly.\n" ) return main(args) + + +def get_best_invocation_for_this_pip() -> str: + """Try to figure out the best way to invoke pip in the current environment.""" + binary_directory = "Scripts" if WINDOWS else "bin" + binary_prefix = os.path.join(sys.prefix, binary_directory) + + # Try to use pip[X[.Y]] names, if those executables for this environment are + # the first on PATH with that name. + path_parts = os.path.normcase(os.environ.get("PATH", "")).split(os.pathsep) + exe_are_in_PATH = os.path.normcase(binary_prefix) in path_parts + if exe_are_in_PATH: + for exe_name in _EXECUTABLE_NAMES: + found_executable = shutil.which(exe_name) + binary_executable = os.path.join(binary_prefix, exe_name) + if ( + found_executable + and os.path.exists(binary_executable) + and os.path.samefile( + found_executable, + binary_executable, + ) + ): + return exe_name + + # Use the `-m` invocation, if there's no "nice" invocation. + return f"{get_best_invocation_for_this_python()} -m pip" + + +def get_best_invocation_for_this_python() -> str: + """Try to figure out the best way to invoke the current Python.""" + exe = sys.executable + exe_name = os.path.basename(exe) + + # Try to use the basename, if it's the first executable. + found_executable = shutil.which(exe_name) + if found_executable and os.path.samefile(found_executable, exe): + return exe_name + + # Use the full executable name, because we couldn't find something simpler. + return exe diff --git a/env/Lib/site-packages/pip/_internal/utils/filesystem.py b/env/Lib/site-packages/pip/_internal/utils/filesystem.py index 3db97dc413b64bdff017a304e82a07b219157ec7..83c2df75b963e5866b63aaf0f4446a8ca61aebce 100644 --- a/env/Lib/site-packages/pip/_internal/utils/filesystem.py +++ b/env/Lib/site-packages/pip/_internal/utils/filesystem.py @@ -2,12 +2,10 @@ import fnmatch import os import os.path import random -import shutil -import stat import sys from contextlib import contextmanager from tempfile import NamedTemporaryFile -from typing import Any, BinaryIO, Iterator, List, Union, cast +from typing import Any, BinaryIO, Generator, List, Union, cast from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed @@ -15,8 +13,7 @@ from pip._internal.utils.compat import get_path_uid from pip._internal.utils.misc import format_size -def check_path_owner(path): - # type: (str) -> bool +def check_path_owner(path: str) -> bool: # If we don't have a way to check the effective uid of this process, then # we'll just assume that we own the directory. if sys.platform == "win32" or not hasattr(os, "geteuid"): @@ -43,38 +40,8 @@ def check_path_owner(path): return False # assume we don't own the path -def copy2_fixed(src, dest): - # type: (str, str) -> None - """Wrap shutil.copy2() but map errors copying socket files to - SpecialFileError as expected. - - See also https://bugs.python.org/issue37700. - """ - try: - shutil.copy2(src, dest) - except OSError: - for f in [src, dest]: - try: - is_socket_file = is_socket(f) - except OSError: - # An error has already occurred. Another error here is not - # a problem and we can ignore it. - pass - else: - if is_socket_file: - raise shutil.SpecialFileError(f"`{f}` is a socket") - - raise - - -def is_socket(path): - # type: (str) -> bool - return stat.S_ISSOCK(os.lstat(path).st_mode) - - @contextmanager -def adjacent_tmp_file(path, **kwargs): - # type: (str, **Any) -> Iterator[BinaryIO] +def adjacent_tmp_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]: """Return a file-like object pointing to a tmp file next to path. The file is created securely and is ensured to be written to disk @@ -98,7 +65,7 @@ def adjacent_tmp_file(path, **kwargs): os.fsync(result.fileno()) -# Tenacity raises RetryError by default, explictly raise the original exception +# Tenacity raises RetryError by default, explicitly raise the original exception _replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25)) replace = _replace_retry(os.replace) @@ -106,8 +73,7 @@ replace = _replace_retry(os.replace) # test_writable_dir and _test_writable_dir_win are copied from Flit, # with the author's agreement to also place them under pip's license. -def test_writable_dir(path): - # type: (str) -> bool +def test_writable_dir(path: str) -> bool: """Check if a directory is writable. Uses os.access() on POSIX, tries creating files on Windows. @@ -125,8 +91,7 @@ def test_writable_dir(path): return _test_writable_dir_win(path) -def _test_writable_dir_win(path): - # type: (str) -> bool +def _test_writable_dir_win(path: str) -> bool: # os.access doesn't work on Windows: http://bugs.python.org/issue2528 # and we can't use tempfile: http://bugs.python.org/issue22107 basename = "accesstest_deleteme_fishfingers_custard_" @@ -154,32 +119,28 @@ def _test_writable_dir_win(path): raise OSError("Unexpected condition testing for writable directory") -def find_files(path, pattern): - # type: (str, str) -> List[str] +def find_files(path: str, pattern: str) -> List[str]: """Returns a list of absolute paths of files beneath path, recursively, with filenames which match the UNIX-style shell glob pattern.""" - result = [] # type: List[str] + result: List[str] = [] for root, _, files in os.walk(path): matches = fnmatch.filter(files, pattern) result.extend(os.path.join(root, f) for f in matches) return result -def file_size(path): - # type: (str) -> Union[int, float] +def file_size(path: str) -> Union[int, float]: # If it's a symlink, return 0. if os.path.islink(path): return 0 return os.path.getsize(path) -def format_file_size(path): - # type: (str) -> str +def format_file_size(path: str) -> str: return format_size(file_size(path)) -def directory_size(path): - # type: (str) -> Union[int, float] +def directory_size(path: str) -> Union[int, float]: size = 0.0 for root, _dirs, files in os.walk(path): for filename in files: @@ -188,6 +149,5 @@ def directory_size(path): return size -def format_directory_size(path): - # type: (str) -> str +def format_directory_size(path: str) -> str: return format_size(directory_size(path)) diff --git a/env/Lib/site-packages/pip/_internal/utils/filetypes.py b/env/Lib/site-packages/pip/_internal/utils/filetypes.py index da935846f6187a39df04f24a475cfefdb1447a8b..5948570178f3e6e79d1ff574241d09d4d8ed78de 100644 --- a/env/Lib/site-packages/pip/_internal/utils/filetypes.py +++ b/env/Lib/site-packages/pip/_internal/utils/filetypes.py @@ -6,21 +6,20 @@ from typing import Tuple from pip._internal.utils.misc import splitext WHEEL_EXTENSION = ".whl" -BZ2_EXTENSIONS = (".tar.bz2", ".tbz") # type: Tuple[str, ...] -XZ_EXTENSIONS = ( +BZ2_EXTENSIONS: Tuple[str, ...] = (".tar.bz2", ".tbz") +XZ_EXTENSIONS: Tuple[str, ...] = ( ".tar.xz", ".txz", ".tlz", ".tar.lz", ".tar.lzma", -) # type: Tuple[str, ...] -ZIP_EXTENSIONS = (".zip", WHEEL_EXTENSION) # type: Tuple[str, ...] -TAR_EXTENSIONS = (".tar.gz", ".tgz", ".tar") # type: Tuple[str, ...] +) +ZIP_EXTENSIONS: Tuple[str, ...] = (".zip", WHEEL_EXTENSION) +TAR_EXTENSIONS: Tuple[str, ...] = (".tar.gz", ".tgz", ".tar") ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS -def is_archive_file(name): - # type: (str) -> bool +def is_archive_file(name: str) -> bool: """Return True if `name` is a considered as an archive file.""" ext = splitext(name)[1].lower() if ext in ARCHIVE_EXTENSIONS: diff --git a/env/Lib/site-packages/pip/_internal/utils/glibc.py b/env/Lib/site-packages/pip/_internal/utils/glibc.py index 1c9ff35446d864fc8dd06b02754b9277b7f65705..81342afa447746dbb8f060da2d454c0175f12e30 100644 --- a/env/Lib/site-packages/pip/_internal/utils/glibc.py +++ b/env/Lib/site-packages/pip/_internal/utils/glibc.py @@ -1,19 +1,14 @@ -# The following comment should be removed at some point in the future. -# mypy: strict-optional=False - import os import sys from typing import Optional, Tuple -def glibc_version_string(): - # type: () -> Optional[str] +def glibc_version_string() -> Optional[str]: "Returns glibc version string, or None if not using glibc." return glibc_version_string_confstr() or glibc_version_string_ctypes() -def glibc_version_string_confstr(): - # type: () -> Optional[str] +def glibc_version_string_confstr() -> Optional[str]: "Primary implementation of glibc_version_string using os.confstr." # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely # to be broken or missing. This strategy is used in the standard library @@ -22,16 +17,18 @@ def glibc_version_string_confstr(): if sys.platform == "win32": return None try: + gnu_libc_version = os.confstr("CS_GNU_LIBC_VERSION") + if gnu_libc_version is None: + return None # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17": - _, version = os.confstr("CS_GNU_LIBC_VERSION").split() + _, version = gnu_libc_version.split() except (AttributeError, OSError, ValueError): # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... return None return version -def glibc_version_string_ctypes(): - # type: () -> Optional[str] +def glibc_version_string_ctypes() -> Optional[str]: "Fallback implementation of glibc_version_string using ctypes." try: @@ -78,8 +75,7 @@ def glibc_version_string_ctypes(): # versions that was generated by pip 8.1.2 and earlier is useless and # misleading. Solution: instead of using platform, use our code that actually # works. -def libc_ver(): - # type: () -> Tuple[str, str] +def libc_ver() -> Tuple[str, str]: """Try to determine the glibc version Returns a tuple of strings (lib, version) which default to empty strings diff --git a/env/Lib/site-packages/pip/_internal/utils/hashes.py b/env/Lib/site-packages/pip/_internal/utils/hashes.py index 3d20b8d02cded0aca0a8b4127b6a746e837dcb8b..843cffc6b3ddd6eb01483bcf1b5c33c717e027b6 100644 --- a/env/Lib/site-packages/pip/_internal/utils/hashes.py +++ b/env/Lib/site-packages/pip/_internal/utils/hashes.py @@ -1,5 +1,5 @@ import hashlib -from typing import TYPE_CHECKING, BinaryIO, Dict, Iterator, List +from typing import TYPE_CHECKING, BinaryIO, Dict, Iterable, List, Optional from pip._internal.exceptions import HashMismatch, HashMissing, InstallationError from pip._internal.utils.misc import read_chunks @@ -28,8 +28,7 @@ class Hashes: """ - def __init__(self, hashes=None): - # type: (Dict[str, List[str]]) -> None + def __init__(self, hashes: Optional[Dict[str, List[str]]] = None) -> None: """ :param hashes: A dict of algorithm names pointing to lists of allowed hex digests @@ -41,8 +40,7 @@ class Hashes: allowed[alg] = sorted(keys) self._allowed = allowed - def __and__(self, other): - # type: (Hashes) -> Hashes + def __and__(self, other: "Hashes") -> "Hashes": if not isinstance(other, Hashes): return NotImplemented @@ -62,21 +60,14 @@ class Hashes: return Hashes(new) @property - def digest_count(self): - # type: () -> int + def digest_count(self) -> int: return sum(len(digests) for digests in self._allowed.values()) - def is_hash_allowed( - self, - hash_name, # type: str - hex_digest, # type: str - ): - # type: (...) -> bool + def is_hash_allowed(self, hash_name: str, hex_digest: str) -> bool: """Return whether the given hex digest is allowed.""" return hex_digest in self._allowed.get(hash_name, []) - def check_against_chunks(self, chunks): - # type: (Iterator[bytes]) -> None + def check_against_chunks(self, chunks: Iterable[bytes]) -> None: """Check good hashes against ones built from iterable of chunks of data. @@ -99,12 +90,10 @@ class Hashes: return self._raise(gots) - def _raise(self, gots): - # type: (Dict[str, _Hash]) -> NoReturn + def _raise(self, gots: Dict[str, "_Hash"]) -> "NoReturn": raise HashMismatch(self._allowed, gots) - def check_against_file(self, file): - # type: (BinaryIO) -> None + def check_against_file(self, file: BinaryIO) -> None: """Check good hashes against a file-like object Raise HashMismatch if none match. @@ -112,28 +101,27 @@ class Hashes: """ return self.check_against_chunks(read_chunks(file)) - def check_against_path(self, path): - # type: (str) -> None + def check_against_path(self, path: str) -> None: with open(path, "rb") as file: return self.check_against_file(file) - def __nonzero__(self): - # type: () -> bool + def has_one_of(self, hashes: Dict[str, str]) -> bool: + """Return whether any of the given hashes are allowed.""" + for hash_name, hex_digest in hashes.items(): + if self.is_hash_allowed(hash_name, hex_digest): + return True + return False + + def __bool__(self) -> bool: """Return whether I know any known-good hashes.""" return bool(self._allowed) - def __bool__(self): - # type: () -> bool - return self.__nonzero__() - - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, Hashes): return NotImplemented return self._allowed == other._allowed - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash( ",".join( sorted( @@ -153,13 +141,11 @@ class MissingHashes(Hashes): """ - def __init__(self): - # type: () -> None + def __init__(self) -> None: """Don't offer the ``hashes`` kwarg.""" # Pass our favorite hash in to generate a "gotten hash". With the # empty list, it will never match, so an error will always raise. super().__init__(hashes={FAVORITE_HASH: []}) - def _raise(self, gots): - # type: (Dict[str, _Hash]) -> NoReturn + def _raise(self, gots: Dict[str, "_Hash"]) -> "NoReturn": raise HashMissing(gots[FAVORITE_HASH].hexdigest()) diff --git a/env/Lib/site-packages/pip/_internal/utils/inject_securetransport.py b/env/Lib/site-packages/pip/_internal/utils/inject_securetransport.py index b6863d934050634930dae8a634ada52e547c29bf..276aa79bb81356cdca73af0a5851b448707784a4 100644 --- a/env/Lib/site-packages/pip/_internal/utils/inject_securetransport.py +++ b/env/Lib/site-packages/pip/_internal/utils/inject_securetransport.py @@ -10,8 +10,7 @@ old to handle TLSv1.2. import sys -def inject_securetransport(): - # type: () -> None +def inject_securetransport() -> None: # Only relevant on macOS if sys.platform != "darwin": return diff --git a/env/Lib/site-packages/pip/_internal/utils/logging.py b/env/Lib/site-packages/pip/_internal/utils/logging.py index 45798d54f7b3b6f8256ccb51110215ab92ca924b..c10e1f4ced6bcc799799b62666695998e095bbaf 100644 --- a/env/Lib/site-packages/pip/_internal/utils/logging.py +++ b/env/Lib/site-packages/pip/_internal/utils/logging.py @@ -4,27 +4,30 @@ import logging import logging.handlers import os import sys -from logging import Filter, getLogger -from typing import IO, Any, Callable, Iterator, Optional, TextIO, Type, cast - +import threading +from dataclasses import dataclass +from io import TextIOWrapper +from logging import Filter +from typing import Any, ClassVar, Generator, List, Optional, TextIO, Type + +from pip._vendor.rich.console import ( + Console, + ConsoleOptions, + ConsoleRenderable, + RenderableType, + RenderResult, + RichCast, +) +from pip._vendor.rich.highlighter import NullHighlighter +from pip._vendor.rich.logging import RichHandler +from pip._vendor.rich.segment import Segment +from pip._vendor.rich.style import Style + +from pip._internal.utils._log import VERBOSE, getLogger from pip._internal.utils.compat import WINDOWS from pip._internal.utils.deprecation import DEPRECATION_MSG_PREFIX from pip._internal.utils.misc import ensure_dir -try: - import threading -except ImportError: - import dummy_threading as threading # type: ignore - - -try: - from pip._vendor import colorama -# Lots of different errors can come from this, including SystemError and -# ImportError. -except Exception: - colorama = None - - _log_state = threading.local() subprocess_logger = getLogger("pip.subprocessor") @@ -34,39 +37,22 @@ class BrokenStdoutLoggingError(Exception): Raised if BrokenPipeError occurs for the stdout stream while logging. """ - pass +def _is_broken_pipe_error(exc_class: Type[BaseException], exc: BaseException) -> bool: + if exc_class is BrokenPipeError: + return True -# BrokenPipeError manifests differently in Windows and non-Windows. -if WINDOWS: - # In Windows, a broken pipe can show up as EINVAL rather than EPIPE: + # On Windows, a broken pipe can show up as EINVAL rather than EPIPE: # https://bugs.python.org/issue19612 # https://bugs.python.org/issue30418 - def _is_broken_pipe_error(exc_class, exc): - # type: (Type[BaseException], BaseException) -> bool - """See the docstring for non-Windows below.""" - return (exc_class is BrokenPipeError) or ( - isinstance(exc, OSError) and exc.errno in (errno.EINVAL, errno.EPIPE) - ) - - -else: - # Then we are in the non-Windows case. - def _is_broken_pipe_error(exc_class, exc): - # type: (Type[BaseException], BaseException) -> bool - """ - Return whether an exception is a broken pipe error. + if not WINDOWS: + return False - Args: - exc_class: an exception class. - exc: an exception instance. - """ - return exc_class is BrokenPipeError + return isinstance(exc, OSError) and exc.errno in (errno.EINVAL, errno.EPIPE) @contextlib.contextmanager -def indent_log(num=2): - # type: (int) -> Iterator[None] +def indent_log(num: int = 2) -> Generator[None, None, None]: """ A context manager which will cause the log output to be indented for any log messages emitted inside it. @@ -80,8 +66,7 @@ def indent_log(num=2): _log_state.indentation -= num -def get_indentation(): - # type: () -> int +def get_indentation() -> int: return getattr(_log_state, "indentation", 0) @@ -90,11 +75,10 @@ class IndentingFormatter(logging.Formatter): def __init__( self, - *args, # type: Any - add_timestamp=False, # type: bool - **kwargs, # type: Any - ): - # type: (...) -> None + *args: Any, + add_timestamp: bool = False, + **kwargs: Any, + ) -> None: """ A logging.Formatter that obeys the indent_log() context manager. @@ -104,8 +88,7 @@ class IndentingFormatter(logging.Formatter): self.add_timestamp = add_timestamp super().__init__(*args, **kwargs) - def get_message_start(self, formatted, levelno): - # type: (str, int) -> str + def get_message_start(self, formatted: str, levelno: int) -> str: """ Return the start of the formatted log message (not counting the prefix to add to each line). @@ -121,8 +104,7 @@ class IndentingFormatter(logging.Formatter): return "ERROR: " - def format(self, record): - # type: (logging.LogRecord) -> str + def format(self, record: logging.LogRecord) -> str: """ Calls the standard formatter, but will indent all of the log message lines by our current indentation level. @@ -139,85 +121,66 @@ class IndentingFormatter(logging.Formatter): return formatted -def _color_wrap(*colors): - # type: (*str) -> Callable[[str], str] - def wrapped(inp): - # type: (str) -> str - return "".join(list(colors) + [inp, colorama.Style.RESET_ALL]) - - return wrapped - - -class ColorizedStreamHandler(logging.StreamHandler): - - # Don't build up a list of colors if we don't have colorama - if colorama: - COLORS = [ - # This needs to be in order from highest logging level to lowest. - (logging.ERROR, _color_wrap(colorama.Fore.RED)), - (logging.WARNING, _color_wrap(colorama.Fore.YELLOW)), - ] - else: - COLORS = [] - - def __init__(self, stream=None, no_color=None): - # type: (Optional[TextIO], bool) -> None - super().__init__(stream) - self._no_color = no_color - - if WINDOWS and colorama: - self.stream = colorama.AnsiToWin32(self.stream) - - def _using_stdout(self): - # type: () -> bool - """ - Return whether the handler is using sys.stdout. - """ - if WINDOWS and colorama: - # Then self.stream is an AnsiToWin32 object. - stream = cast(colorama.AnsiToWin32, self.stream) - return stream.wrapped is sys.stdout - - return self.stream is sys.stdout - - def should_color(self): - # type: () -> bool - # Don't colorize things if we do not have colorama or if told not to - if not colorama or self._no_color: - return False - - real_stream = ( - self.stream - if not isinstance(self.stream, colorama.AnsiToWin32) - else self.stream.wrapped +@dataclass +class IndentedRenderable: + renderable: RenderableType + indent: int + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + segments = console.render(self.renderable, options) + lines = Segment.split_lines(segments) + for line in lines: + yield Segment(" " * self.indent) + yield from line + yield Segment("\n") + + +class RichPipStreamHandler(RichHandler): + KEYWORDS: ClassVar[Optional[List[str]]] = [] + + def __init__(self, stream: Optional[TextIO], no_color: bool) -> None: + super().__init__( + console=Console(file=stream, no_color=no_color, soft_wrap=True), + show_time=False, + show_level=False, + show_path=False, + highlighter=NullHighlighter(), ) - # If the stream is a tty we should color it - if hasattr(real_stream, "isatty") and real_stream.isatty(): - return True - - # If we have an ANSI term we should color it - if os.environ.get("TERM") == "ANSI": - return True + # Our custom override on Rich's logger, to make things work as we need them to. + def emit(self, record: logging.LogRecord) -> None: + style: Optional[Style] = None + + # If we are given a diagnostic error to present, present it with indentation. + assert isinstance(record.args, tuple) + if record.msg == "[present-rich] %s" and len(record.args) == 1: + rich_renderable = record.args[0] + assert isinstance( + rich_renderable, (ConsoleRenderable, RichCast, str) + ), f"{rich_renderable} is not rich-console-renderable" + + renderable: RenderableType = IndentedRenderable( + rich_renderable, indent=get_indentation() + ) + else: + message = self.format(record) + renderable = self.render_message(record, message) + if record.levelno is not None: + if record.levelno >= logging.ERROR: + style = Style(color="red") + elif record.levelno >= logging.WARNING: + style = Style(color="yellow") + + try: + self.console.print(renderable, overflow="ignore", crop=False, style=style) + except Exception: + self.handleError(record) + + def handleError(self, record: logging.LogRecord) -> None: + """Called when logging is unable to log some output.""" - # If anything else we should not color it - return False - - def format(self, record): - # type: (logging.LogRecord) -> str - msg = super().format(record) - - if self.should_color(): - for level, color in self.COLORS: - if record.levelno >= level: - msg = color(msg) - break - - return msg - - # The logging module says handleError() can be customized. - def handleError(self, record): - # type: (logging.LogRecord) -> None exc_class, exc = sys.exc_info()[:2] # If a broken pipe occurred while calling write() or flush() on the # stdout stream in logging's Handler.emit(), then raise our special @@ -226,7 +189,7 @@ class ColorizedStreamHandler(logging.StreamHandler): if ( exc_class and exc - and self._using_stdout() + and self.console.file is sys.stdout and _is_broken_pipe_error(exc_class, exc) ): raise BrokenStdoutLoggingError() @@ -235,19 +198,16 @@ class ColorizedStreamHandler(logging.StreamHandler): class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler): - def _open(self): - # type: () -> IO[Any] + def _open(self) -> TextIOWrapper: ensure_dir(os.path.dirname(self.baseFilename)) return super()._open() class MaxLevelFilter(Filter): - def __init__(self, level): - # type: (int) -> None + def __init__(self, level: int) -> None: self.level = level - def filter(self, record): - # type: (logging.LogRecord) -> bool + def filter(self, record: logging.LogRecord) -> bool: return record.levelno < self.level @@ -257,33 +217,33 @@ class ExcludeLoggerFilter(Filter): A logging Filter that excludes records from a logger (or its children). """ - def filter(self, record): - # type: (logging.LogRecord) -> bool + def filter(self, record: logging.LogRecord) -> bool: # The base Filter class allows only records from a logger (or its # children). return not super().filter(record) -def setup_logging(verbosity, no_color, user_log_file): - # type: (int, bool, Optional[str]) -> int +def setup_logging(verbosity: int, no_color: bool, user_log_file: Optional[str]) -> int: """Configures and sets up all of the logging Returns the requested logging level, as its integer value. """ # Determine the level to be logging at. - if verbosity >= 1: - level = "DEBUG" + if verbosity >= 2: + level_number = logging.DEBUG + elif verbosity == 1: + level_number = VERBOSE elif verbosity == -1: - level = "WARNING" + level_number = logging.WARNING elif verbosity == -2: - level = "ERROR" + level_number = logging.ERROR elif verbosity <= -3: - level = "CRITICAL" + level_number = logging.CRITICAL else: - level = "INFO" + level_number = logging.INFO - level_number = getattr(logging, level) + level = logging.getLevelName(level_number) # The "root" logger should match the "console" level *unless* we also need # to log to a user log file. @@ -305,7 +265,7 @@ def setup_logging(verbosity, no_color, user_log_file): "stderr": "ext://sys.stderr", } handler_classes = { - "stream": "pip._internal.utils.logging.ColorizedStreamHandler", + "stream": "pip._internal.utils.logging.RichPipStreamHandler", "file": "pip._internal.utils.logging.BetterRotatingFileHandler", } handlers = ["console", "console_errors", "console_subprocess"] + ( @@ -363,8 +323,8 @@ def setup_logging(verbosity, no_color, user_log_file): "console_subprocess": { "level": level, "class": handler_classes["stream"], - "no_color": no_color, "stream": log_streams["stderr"], + "no_color": no_color, "filters": ["restrict_to_subprocess"], "formatter": "indent", }, @@ -372,6 +332,7 @@ def setup_logging(verbosity, no_color, user_log_file): "level": "DEBUG", "class": handler_classes["file"], "filename": additional_log_file, + "encoding": "utf-8", "delay": True, "formatter": "indent_with_timestamp", }, diff --git a/env/Lib/site-packages/pip/_internal/utils/misc.py b/env/Lib/site-packages/pip/_internal/utils/misc.py index a4ad35be61c28cb70e5010007c169c0e490131f7..bd191c4e14f389d6d0f799dfef9c5c0221a8c568 100644 --- a/env/Lib/site-packages/pip/_internal/utils/misc.py +++ b/env/Lib/site-packages/pip/_internal/utils/misc.py @@ -1,6 +1,3 @@ -# The following comment should be removed at some point in the future. -# mypy: strict-optional=False - import contextlib import errno import getpass @@ -12,17 +9,18 @@ import posixpath import shutil import stat import sys +import sysconfig import urllib.parse from io import StringIO from itertools import filterfalse, tee, zip_longest from types import TracebackType from typing import ( Any, - AnyStr, BinaryIO, Callable, - Container, ContextManager, + Dict, + Generator, Iterable, Iterator, List, @@ -31,20 +29,18 @@ from typing import ( Tuple, Type, TypeVar, + Union, cast, ) -from pip._vendor.pkg_resources import Distribution +from pip._vendor.pyproject_hooks import BuildBackendHookCaller from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed from pip import __version__ -from pip._internal.exceptions import CommandError -from pip._internal.locations import get_major_minor_version, site_packages, user_site -from pip._internal.utils.compat import WINDOWS, stdlib_pkgs -from pip._internal.utils.virtualenv import ( - running_under_virtualenv, - virtualenv_no_global, -) +from pip._internal.exceptions import CommandError, ExternallyManagedEnvironment +from pip._internal.locations import get_major_minor_version +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.virtualenv import running_under_virtualenv __all__ = [ "rmtree", @@ -60,9 +56,10 @@ __all__ = [ "captured_stdout", "ensure_dir", "remove_auth_from_url", + "check_externally_managed", + "ConfiguredBuildBackendHookCaller", ] - logger = logging.getLogger(__name__) T = TypeVar("T") @@ -71,8 +68,7 @@ VersionInfo = Tuple[int, int, int] NetlocTuple = Tuple[str, Tuple[Optional[str], Optional[str]]] -def get_pip_version(): - # type: () -> str +def get_pip_version() -> str: pip_pkg_dir = os.path.join(os.path.dirname(__file__), "..", "..") pip_pkg_dir = os.path.abspath(pip_pkg_dir) @@ -83,8 +79,7 @@ def get_pip_version(): ) -def normalize_version_info(py_version_info): - # type: (Tuple[int, ...]) -> Tuple[int, int, int] +def normalize_version_info(py_version_info: Tuple[int, ...]) -> Tuple[int, int, int]: """ Convert a tuple of ints representing a Python version to one of length three. @@ -103,8 +98,7 @@ def normalize_version_info(py_version_info): return cast("VersionInfo", py_version_info) -def ensure_dir(path): - # type: (AnyStr) -> None +def ensure_dir(path: str) -> None: """os.path.makedirs without EEXIST.""" try: os.makedirs(path) @@ -114,8 +108,7 @@ def ensure_dir(path): raise -def get_prog(): - # type: () -> str +def get_prog() -> str: try: prog = os.path.basename(sys.argv[0]) if prog in ("__main__.py", "-c"): @@ -128,15 +121,18 @@ def get_prog(): # Retry every half second for up to 3 seconds -# Tenacity raises RetryError by default, explictly raise the original exception +# Tenacity raises RetryError by default, explicitly raise the original exception @retry(reraise=True, stop=stop_after_delay(3), wait=wait_fixed(0.5)) -def rmtree(dir, ignore_errors=False): - # type: (AnyStr, bool) -> None - shutil.rmtree(dir, ignore_errors=ignore_errors, onerror=rmtree_errorhandler) +def rmtree(dir: str, ignore_errors: bool = False) -> None: + if sys.version_info >= (3, 12): + shutil.rmtree(dir, ignore_errors=ignore_errors, onexc=rmtree_errorhandler) + else: + shutil.rmtree(dir, ignore_errors=ignore_errors, onerror=rmtree_errorhandler) -def rmtree_errorhandler(func, path, exc_info): - # type: (Callable[..., Any], str, ExcInfo) -> None +def rmtree_errorhandler( + func: Callable[..., Any], path: str, exc_info: Union[ExcInfo, BaseException] +) -> None: """On Windows, the files in .svn are read-only, so when rmtree() tries to remove them, an exception is thrown. We catch that here, remove the read-only attribute, and hopefully continue without problems.""" @@ -156,8 +152,7 @@ def rmtree_errorhandler(func, path, exc_info): raise -def display_path(path): - # type: (str) -> str +def display_path(path: str) -> str: """Gives the display value for a given path, making it relative to cwd if possible.""" path = os.path.normcase(os.path.abspath(path)) @@ -166,8 +161,7 @@ def display_path(path): return path -def backup_dir(dir, ext=".bak"): - # type: (str, str) -> str +def backup_dir(dir: str, ext: str = ".bak") -> str: """Figure out the name of a directory to back up the given dir to (adding .bak, .bak2, etc)""" n = 1 @@ -178,16 +172,14 @@ def backup_dir(dir, ext=".bak"): return dir + extension -def ask_path_exists(message, options): - # type: (str, Iterable[str]) -> str +def ask_path_exists(message: str, options: Iterable[str]) -> str: for action in os.environ.get("PIP_EXISTS_ACTION", "").split(): if action in options: return action return ask(message, options) -def _check_no_input(message): - # type: (str) -> None +def _check_no_input(message: str) -> None: """Raise an error if no input is allowed.""" if os.environ.get("PIP_NO_INPUT"): raise Exception( @@ -195,8 +187,7 @@ def _check_no_input(message): ) -def ask(message, options): - # type: (str, Iterable[str]) -> str +def ask(message: str, options: Iterable[str]) -> str: """Ask the message interactively, with the given possible responses""" while 1: _check_no_input(message) @@ -211,22 +202,19 @@ def ask(message, options): return response -def ask_input(message): - # type: (str) -> str +def ask_input(message: str) -> str: """Ask for input interactively.""" _check_no_input(message) return input(message) -def ask_password(message): - # type: (str) -> str +def ask_password(message: str) -> str: """Ask for a password interactively.""" _check_no_input(message) return getpass.getpass(message) -def strtobool(val): - # type: (str) -> int +def strtobool(val: str) -> int: """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values @@ -242,8 +230,7 @@ def strtobool(val): raise ValueError(f"invalid truth value {val!r}") -def format_size(bytes): - # type: (float) -> str +def format_size(bytes: float) -> str: if bytes > 1000 * 1000: return "{:.1f} MB".format(bytes / 1000.0 / 1000) elif bytes > 10 * 1000: @@ -254,8 +241,7 @@ def format_size(bytes): return "{} bytes".format(int(bytes)) -def tabulate(rows): - # type: (Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]] +def tabulate(rows: Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]]: """Return a list of formatted rows and a list of column sizes. For example:: @@ -270,17 +256,25 @@ def tabulate(rows): def is_installable_dir(path: str) -> bool: - """Is path is a directory containing pyproject.toml, setup.cfg or setup.py?""" + """Is path is a directory containing pyproject.toml or setup.py? + + If pyproject.toml exists, this is a PEP 517 project. Otherwise we look for + a legacy setuptools layout by identifying setup.py. We don't check for the + setup.cfg because using it without setup.py is only available for PEP 517 + projects, which are already covered by the pyproject.toml check. + """ if not os.path.isdir(path): return False - return any( - os.path.isfile(os.path.join(path, signifier)) - for signifier in ("pyproject.toml", "setup.cfg", "setup.py") - ) + if os.path.isfile(os.path.join(path, "pyproject.toml")): + return True + if os.path.isfile(os.path.join(path, "setup.py")): + return True + return False -def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): - # type: (BinaryIO, int) -> Iterator[bytes] +def read_chunks( + file: BinaryIO, size: int = io.DEFAULT_BUFFER_SIZE +) -> Generator[bytes, None, None]: """Yield pieces of data from a file-like object until EOF.""" while True: chunk = file.read(size) @@ -289,8 +283,7 @@ def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): yield chunk -def normalize_path(path, resolve_symlinks=True): - # type: (str, bool) -> str +def normalize_path(path: str, resolve_symlinks: bool = True) -> str: """ Convert a path to its canonical, case-normalized, absolute version. @@ -303,8 +296,7 @@ def normalize_path(path, resolve_symlinks=True): return os.path.normcase(path) -def splitext(path): - # type: (str) -> Tuple[str, str] +def splitext(path: str) -> Tuple[str, str]: """Like os.path.splitext, but take off .tar too""" base, ext = posixpath.splitext(path) if base.lower().endswith(".tar"): @@ -313,8 +305,7 @@ def splitext(path): return base, ext -def renames(old, new): - # type: (str, str) -> None +def renames(old: str, new: str) -> None: """Like os.renames(), but handles renaming across devices.""" # Implementation borrowed from os.renames(). head, tail = os.path.split(new) @@ -331,8 +322,7 @@ def renames(old, new): pass -def is_local(path): - # type: (str) -> bool +def is_local(path: str) -> bool: """ Return True if path is within sys.prefix, if we're running in a virtualenv. @@ -346,171 +336,28 @@ def is_local(path): return path.startswith(normalize_path(sys.prefix)) -def dist_is_local(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution object is installed locally - (i.e. within current virtualenv). - - Always True if we're not in a virtualenv. - - """ - return is_local(dist_location(dist)) - - -def dist_in_usersite(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution is installed in user site. - """ - return dist_location(dist).startswith(normalize_path(user_site)) - - -def dist_in_site_packages(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution is installed in - sysconfig.get_python_lib(). - """ - return dist_location(dist).startswith(normalize_path(site_packages)) - - -def dist_is_editable(dist): - # type: (Distribution) -> bool - """ - Return True if given Distribution is an editable install. - """ - for path_item in sys.path: - egg_link = os.path.join(path_item, dist.project_name + ".egg-link") - if os.path.isfile(egg_link): - return True - return False - - -def get_installed_distributions( - local_only=True, # type: bool - skip=stdlib_pkgs, # type: Container[str] - include_editables=True, # type: bool - editables_only=False, # type: bool - user_only=False, # type: bool - paths=None, # type: Optional[List[str]] -): - # type: (...) -> List[Distribution] - """Return a list of installed Distribution objects. - - Left for compatibility until direct pkg_resources uses are refactored out. - """ - from pip._internal.metadata import get_default_environment, get_environment - from pip._internal.metadata.pkg_resources import Distribution as _Dist - - if paths is None: - env = get_default_environment() - else: - env = get_environment(paths) - dists = env.iter_installed_distributions( - local_only=local_only, - skip=skip, - include_editables=include_editables, - editables_only=editables_only, - user_only=user_only, - ) - return [cast(_Dist, dist)._dist for dist in dists] - - -def get_distribution(req_name): - # type: (str) -> Optional[Distribution] - """Given a requirement name, return the installed Distribution object. - - This searches from *all* distributions available in the environment, to - match the behavior of ``pkg_resources.get_distribution()``. - - Left for compatibility until direct pkg_resources uses are refactored out. - """ - from pip._internal.metadata import get_default_environment - from pip._internal.metadata.pkg_resources import Distribution as _Dist - - dist = get_default_environment().get_distribution(req_name) - if dist is None: - return None - return cast(_Dist, dist)._dist - - -def egg_link_path(dist): - # type: (Distribution) -> Optional[str] - """ - Return the path for the .egg-link file if it exists, otherwise, None. - - There's 3 scenarios: - 1) not in a virtualenv - try to find in site.USER_SITE, then site_packages - 2) in a no-global virtualenv - try to find in site_packages - 3) in a yes-global virtualenv - try to find in site_packages, then site.USER_SITE - (don't look in global location) - - For #1 and #3, there could be odd cases, where there's an egg-link in 2 - locations. - - This method will just return the first one found. - """ - sites = [] - if running_under_virtualenv(): - sites.append(site_packages) - if not virtualenv_no_global() and user_site: - sites.append(user_site) - else: - if user_site: - sites.append(user_site) - sites.append(site_packages) - - for site in sites: - egglink = os.path.join(site, dist.project_name) + ".egg-link" - if os.path.isfile(egglink): - return egglink - return None - - -def dist_location(dist): - # type: (Distribution) -> str - """ - Get the site-packages location of this distribution. Generally - this is dist.location, except in the case of develop-installed - packages, where dist.location is the source code location, and we - want to know where the egg-link file is. - - The returned location is normalized (in particular, with symlinks removed). - """ - egg_link = egg_link_path(dist) - if egg_link: - return normalize_path(egg_link) - return normalize_path(dist.location) - - -def write_output(msg, *args): - # type: (Any, Any) -> None +def write_output(msg: Any, *args: Any) -> None: logger.info(msg, *args) class StreamWrapper(StringIO): - orig_stream = None # type: TextIO + orig_stream: TextIO @classmethod - def from_stream(cls, orig_stream): - # type: (TextIO) -> StreamWrapper - cls.orig_stream = orig_stream - return cls() + def from_stream(cls, orig_stream: TextIO) -> "StreamWrapper": + ret = cls() + ret.orig_stream = orig_stream + return ret # compileall.compile_dir() needs stdout.encoding to print to stdout - # https://github.com/python/mypy/issues/4125 + # type ignore is because TextIOBase.encoding is writeable @property - def encoding(self): # type: ignore + def encoding(self) -> str: # type: ignore return self.orig_stream.encoding @contextlib.contextmanager -def captured_output(stream_name): - # type: (str) -> Iterator[StreamWrapper] +def captured_output(stream_name: str) -> Generator[StreamWrapper, None, None]: """Return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a StringIO. @@ -524,8 +371,7 @@ def captured_output(stream_name): setattr(sys, stream_name, orig_stdout) -def captured_stdout(): - # type: () -> ContextManager[StreamWrapper] +def captured_stdout() -> ContextManager[StreamWrapper]: """Capture the output of sys.stdout: with captured_stdout() as stdout: @@ -537,8 +383,7 @@ def captured_stdout(): return captured_output("stdout") -def captured_stderr(): - # type: () -> ContextManager[StreamWrapper] +def captured_stderr() -> ContextManager[StreamWrapper]: """ See captured_stdout(). """ @@ -546,16 +391,14 @@ def captured_stderr(): # Simulates an enum -def enum(*sequential, **named): - # type: (*Any, **Any) -> Type[Any] +def enum(*sequential: Any, **named: Any) -> Type[Any]: enums = dict(zip(sequential, range(len(sequential))), **named) reverse = {value: key for key, value in enums.items()} enums["reverse_mapping"] = reverse return type("Enum", (), enums) -def build_netloc(host, port): - # type: (str, Optional[int]) -> str +def build_netloc(host: str, port: Optional[int]) -> str: """ Build a netloc from a host-port pair """ @@ -567,8 +410,7 @@ def build_netloc(host, port): return f"{host}:{port}" -def build_url_from_netloc(netloc, scheme="https"): - # type: (str, str) -> str +def build_url_from_netloc(netloc: str, scheme: str = "https") -> str: """ Build a full URL from a netloc. """ @@ -578,8 +420,7 @@ def build_url_from_netloc(netloc, scheme="https"): return f"{scheme}://{netloc}" -def parse_netloc(netloc): - # type: (str) -> Tuple[str, Optional[int]] +def parse_netloc(netloc: str) -> Tuple[Optional[str], Optional[int]]: """ Return the host-port pair from a netloc. """ @@ -588,8 +429,7 @@ def parse_netloc(netloc): return parsed.hostname, parsed.port -def split_auth_from_netloc(netloc): - # type: (str) -> NetlocTuple +def split_auth_from_netloc(netloc: str) -> NetlocTuple: """ Parse out and remove the auth information from a netloc. @@ -602,7 +442,7 @@ def split_auth_from_netloc(netloc): # behaves if more than one @ is present (which can be checked using # the password attribute of urlsplit()'s return value). auth, netloc = netloc.rsplit("@", 1) - pw = None # type: Optional[str] + pw: Optional[str] = None if ":" in auth: # Split from the left because that's how urllib.parse.urlsplit() # behaves if more than one : is present (which again can be checked @@ -618,8 +458,7 @@ def split_auth_from_netloc(netloc): return netloc, (user, pw) -def redact_netloc(netloc): - # type: (str) -> str +def redact_netloc(netloc: str) -> str: """ Replace the sensitive data in a netloc with "****", if it exists. @@ -641,8 +480,9 @@ def redact_netloc(netloc): ) -def _transform_url(url, transform_netloc): - # type: (str, Callable[[str], Tuple[Any, ...]]) -> Tuple[str, NetlocTuple] +def _transform_url( + url: str, transform_netloc: Callable[[str], Tuple[Any, ...]] +) -> Tuple[str, NetlocTuple]: """Transform and replace netloc in a url. transform_netloc is a function taking the netloc and returning a @@ -660,18 +500,17 @@ def _transform_url(url, transform_netloc): return surl, cast("NetlocTuple", netloc_tuple) -def _get_netloc(netloc): - # type: (str) -> NetlocTuple +def _get_netloc(netloc: str) -> NetlocTuple: return split_auth_from_netloc(netloc) -def _redact_netloc(netloc): - # type: (str) -> Tuple[str,] +def _redact_netloc(netloc: str) -> Tuple[str]: return (redact_netloc(netloc),) -def split_auth_netloc_from_url(url): - # type: (str) -> Tuple[str, str, Tuple[str, str]] +def split_auth_netloc_from_url( + url: str, +) -> Tuple[str, str, Tuple[Optional[str], Optional[str]]]: """ Parse a url into separate netloc, auth, and url with no auth. @@ -681,41 +520,31 @@ def split_auth_netloc_from_url(url): return url_without_auth, netloc, auth -def remove_auth_from_url(url): - # type: (str) -> str +def remove_auth_from_url(url: str) -> str: """Return a copy of url with 'username:password@' removed.""" # username/pass params are passed to subversion through flags # and are not recognized in the url. return _transform_url(url, _get_netloc)[0] -def redact_auth_from_url(url): - # type: (str) -> str +def redact_auth_from_url(url: str) -> str: """Replace the password in a given url with ****.""" return _transform_url(url, _redact_netloc)[0] class HiddenText: - def __init__( - self, - secret, # type: str - redacted, # type: str - ): - # type: (...) -> None + def __init__(self, secret: str, redacted: str) -> None: self.secret = secret self.redacted = redacted - def __repr__(self): - # type: (...) -> str + def __repr__(self) -> str: return "<HiddenText {!r}>".format(str(self)) - def __str__(self): - # type: (...) -> str + def __str__(self) -> str: return self.redacted # This is useful for testing. - def __eq__(self, other): - # type: (Any) -> bool + def __eq__(self, other: Any) -> bool: if type(self) != type(other): return False @@ -724,28 +553,25 @@ class HiddenText: return self.secret == other.secret -def hide_value(value): - # type: (str) -> HiddenText +def hide_value(value: str) -> HiddenText: return HiddenText(value, redacted="****") -def hide_url(url): - # type: (str) -> HiddenText +def hide_url(url: str) -> HiddenText: redacted = redact_auth_from_url(url) return HiddenText(url, redacted=redacted) -def protect_pip_from_modification_on_windows(modifying_pip): - # type: (bool) -> None +def protect_pip_from_modification_on_windows(modifying_pip: bool) -> None: """Protection of pip.exe from modification on Windows On Windows, any operation modifying pip should be run as: python -m pip ... """ pip_names = [ - "pip.exe", - "pip{}.exe".format(sys.version_info[0]), - "pip{}.{}.exe".format(*sys.version_info[:2]), + "pip", + f"pip{sys.version_info.major}", + f"pip{sys.version_info.major}.{sys.version_info.minor}", ] # See https://github.com/pypa/pip/issues/1299 for more discussion @@ -762,14 +588,27 @@ def protect_pip_from_modification_on_windows(modifying_pip): ) -def is_console_interactive(): - # type: () -> bool +def check_externally_managed() -> None: + """Check whether the current environment is externally managed. + + If the ``EXTERNALLY-MANAGED`` config file is found, the current environment + is considered externally managed, and an ExternallyManagedEnvironment is + raised. + """ + if running_under_virtualenv(): + return + marker = os.path.join(sysconfig.get_path("stdlib"), "EXTERNALLY-MANAGED") + if not os.path.isfile(marker): + return + raise ExternallyManagedEnvironment.from_config(marker) + + +def is_console_interactive() -> bool: """Is this console interactive?""" return sys.stdin is not None and sys.stdin.isatty() -def hash_file(path, blocksize=1 << 20): - # type: (str, int) -> Tuple[Any, int] +def hash_file(path: str, blocksize: int = 1 << 20) -> Tuple[Any, int]: """Return (hash, length) for path using hashlib.sha256()""" h = hashlib.sha256() @@ -781,21 +620,7 @@ def hash_file(path, blocksize=1 << 20): return h, length -def is_wheel_installed(): - # type: () -> bool - """ - Return whether the wheel package is installed. - """ - try: - import wheel # noqa: F401 - except ImportError: - return False - - return True - - -def pairwise(iterable): - # type: (Iterable[Any]) -> Iterator[Tuple[Any, Any]] +def pairwise(iterable: Iterable[Any]) -> Iterator[Tuple[Any, Any]]: """ Return paired elements. @@ -807,10 +632,9 @@ def pairwise(iterable): def partition( - pred, # type: Callable[[T], bool] - iterable, # type: Iterable[T] -): - # type: (...) -> Tuple[Iterable[T], Iterable[T]] + pred: Callable[[T], bool], + iterable: Iterable[T], +) -> Tuple[Iterable[T], Iterable[T]]: """ Use a predicate to partition entries into false entries and true entries, like @@ -819,3 +643,93 @@ def partition( """ t1, t2 = tee(iterable) return filterfalse(pred, t1), filter(pred, t2) + + +class ConfiguredBuildBackendHookCaller(BuildBackendHookCaller): + def __init__( + self, + config_holder: Any, + source_dir: str, + build_backend: str, + backend_path: Optional[str] = None, + runner: Optional[Callable[..., None]] = None, + python_executable: Optional[str] = None, + ): + super().__init__( + source_dir, build_backend, backend_path, runner, python_executable + ) + self.config_holder = config_holder + + def build_wheel( + self, + wheel_directory: str, + config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, + metadata_directory: Optional[str] = None, + ) -> str: + cs = self.config_holder.config_settings + return super().build_wheel( + wheel_directory, config_settings=cs, metadata_directory=metadata_directory + ) + + def build_sdist( + self, + sdist_directory: str, + config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, + ) -> str: + cs = self.config_holder.config_settings + return super().build_sdist(sdist_directory, config_settings=cs) + + def build_editable( + self, + wheel_directory: str, + config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, + metadata_directory: Optional[str] = None, + ) -> str: + cs = self.config_holder.config_settings + return super().build_editable( + wheel_directory, config_settings=cs, metadata_directory=metadata_directory + ) + + def get_requires_for_build_wheel( + self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None + ) -> List[str]: + cs = self.config_holder.config_settings + return super().get_requires_for_build_wheel(config_settings=cs) + + def get_requires_for_build_sdist( + self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None + ) -> List[str]: + cs = self.config_holder.config_settings + return super().get_requires_for_build_sdist(config_settings=cs) + + def get_requires_for_build_editable( + self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None + ) -> List[str]: + cs = self.config_holder.config_settings + return super().get_requires_for_build_editable(config_settings=cs) + + def prepare_metadata_for_build_wheel( + self, + metadata_directory: str, + config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, + _allow_fallback: bool = True, + ) -> str: + cs = self.config_holder.config_settings + return super().prepare_metadata_for_build_wheel( + metadata_directory=metadata_directory, + config_settings=cs, + _allow_fallback=_allow_fallback, + ) + + def prepare_metadata_for_build_editable( + self, + metadata_directory: str, + config_settings: Optional[Dict[str, Union[str, List[str]]]] = None, + _allow_fallback: bool = True, + ) -> str: + cs = self.config_holder.config_settings + return super().prepare_metadata_for_build_editable( + metadata_directory=metadata_directory, + config_settings=cs, + _allow_fallback=_allow_fallback, + ) diff --git a/env/Lib/site-packages/pip/_internal/utils/models.py b/env/Lib/site-packages/pip/_internal/utils/models.py index 0e02bc7a5b1748c86b414bdd019e607d5857ce07..b6bb21a8b26680b38c3af8278ed139b6628356c5 100644 --- a/env/Lib/site-packages/pip/_internal/utils/models.py +++ b/env/Lib/site-packages/pip/_internal/utils/models.py @@ -10,37 +10,29 @@ class KeyBasedCompareMixin: __slots__ = ["_compare_key", "_defining_class"] - def __init__(self, key, defining_class): - # type: (Any, Type[KeyBasedCompareMixin]) -> None + def __init__(self, key: Any, defining_class: Type["KeyBasedCompareMixin"]) -> None: self._compare_key = key self._defining_class = defining_class - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(self._compare_key) - def __lt__(self, other): - # type: (Any) -> bool + def __lt__(self, other: Any) -> bool: return self._compare(other, operator.__lt__) - def __le__(self, other): - # type: (Any) -> bool + def __le__(self, other: Any) -> bool: return self._compare(other, operator.__le__) - def __gt__(self, other): - # type: (Any) -> bool + def __gt__(self, other: Any) -> bool: return self._compare(other, operator.__gt__) - def __ge__(self, other): - # type: (Any) -> bool + def __ge__(self, other: Any) -> bool: return self._compare(other, operator.__ge__) - def __eq__(self, other): - # type: (Any) -> bool + def __eq__(self, other: Any) -> bool: return self._compare(other, operator.__eq__) - def _compare(self, other, method): - # type: (Any, Callable[[Any, Any], bool]) -> bool + def _compare(self, other: Any, method: Callable[[Any, Any], bool]) -> bool: if not isinstance(other, self._defining_class): return NotImplemented diff --git a/env/Lib/site-packages/pip/_internal/utils/packaging.py b/env/Lib/site-packages/pip/_internal/utils/packaging.py index 3f9dbd3b7a0b86fca0df364c7b0795ac6792a870..b9f6af4d17410ce7e1d573c41a1f04dd18ae275e 100644 --- a/env/Lib/site-packages/pip/_internal/utils/packaging.py +++ b/env/Lib/site-packages/pip/_internal/utils/packaging.py @@ -1,20 +1,19 @@ +import functools import logging -from email.message import Message -from email.parser import FeedParser -from typing import Optional, Tuple +import re +from typing import NewType, Optional, Tuple, cast -from pip._vendor import pkg_resources from pip._vendor.packaging import specifiers, version -from pip._vendor.pkg_resources import Distribution +from pip._vendor.packaging.requirements import Requirement -from pip._internal.exceptions import NoneMetadataError -from pip._internal.utils.misc import display_path +NormalizedExtra = NewType("NormalizedExtra", str) logger = logging.getLogger(__name__) -def check_requires_python(requires_python, version_info): - # type: (Optional[str], Tuple[int, ...]) -> bool +def check_requires_python( + requires_python: Optional[str], version_info: Tuple[int, ...] +) -> bool: """ Check if the given Python version matches a "Requires-Python" specifier. @@ -35,55 +34,24 @@ def check_requires_python(requires_python, version_info): return python_version in requires_python_specifier -def get_metadata(dist): - # type: (Distribution) -> Message - """ - :raises NoneMetadataError: if the distribution reports `has_metadata()` - True but `get_metadata()` returns None. - """ - metadata_name = "METADATA" - if isinstance(dist, pkg_resources.DistInfoDistribution) and dist.has_metadata( - metadata_name - ): - metadata = dist.get_metadata(metadata_name) - elif dist.has_metadata("PKG-INFO"): - metadata_name = "PKG-INFO" - metadata = dist.get_metadata(metadata_name) - else: - logger.warning("No metadata found in %s", display_path(dist.location)) - metadata = "" +@functools.lru_cache(maxsize=512) +def get_requirement(req_string: str) -> Requirement: + """Construct a packaging.Requirement object with caching""" + # Parsing requirement strings is expensive, and is also expected to happen + # with a low diversity of different arguments (at least relative the number + # constructed). This method adds a cache to requirement object creation to + # minimize repeated parsing of the same string to construct equivalent + # Requirement objects. + return Requirement(req_string) - if metadata is None: - raise NoneMetadataError(dist, metadata_name) - feed_parser = FeedParser() - # The following line errors out if with a "NoneType" TypeError if - # passed metadata=None. - feed_parser.feed(metadata) - return feed_parser.close() +def safe_extra(extra: str) -> NormalizedExtra: + """Convert an arbitrary string to a standard 'extra' name + Any runs of non-alphanumeric characters are replaced with a single '_', + and the result is always lowercased. -def get_requires_python(dist): - # type: (pkg_resources.Distribution) -> Optional[str] + This function is duplicated from ``pkg_resources``. Note that this is not + the same to either ``canonicalize_name`` or ``_egg_link_name``. """ - Return the "Requires-Python" metadata for a distribution, or None - if not present. - """ - pkg_info_dict = get_metadata(dist) - requires_python = pkg_info_dict.get("Requires-Python") - - if requires_python is not None: - # Convert to a str to satisfy the type checker, since requires_python - # can be a Header object. - requires_python = str(requires_python) - - return requires_python - - -def get_installer(dist): - # type: (Distribution) -> str - if dist.has_metadata("INSTALLER"): - for line in dist.get_metadata_lines("INSTALLER"): - if line.strip(): - return line.strip() - return "" + return cast(NormalizedExtra, re.sub("[^A-Za-z0-9.-]+", "_", extra).lower()) diff --git a/env/Lib/site-packages/pip/_internal/utils/parallel.py b/env/Lib/site-packages/pip/_internal/utils/parallel.py deleted file mode 100644 index de91dc8abc88b86fb4186a1a824f684861f04ff5..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_internal/utils/parallel.py +++ /dev/null @@ -1,101 +0,0 @@ -"""Convenient parallelization of higher order functions. - -This module provides two helper functions, with appropriate fallbacks on -Python 2 and on systems lacking support for synchronization mechanisms: - -- map_multiprocess -- map_multithread - -These helpers work like Python 3's map, with two differences: - -- They don't guarantee the order of processing of - the elements of the iterable. -- The underlying process/thread pools chop the iterable into - a number of chunks, so that for very long iterables using - a large value for chunksize can make the job complete much faster - than using the default value of 1. -""" - -__all__ = ["map_multiprocess", "map_multithread"] - -from contextlib import contextmanager -from multiprocessing import Pool as ProcessPool -from multiprocessing import pool -from multiprocessing.dummy import Pool as ThreadPool -from typing import Callable, Iterable, Iterator, TypeVar, Union - -from pip._vendor.requests.adapters import DEFAULT_POOLSIZE - -Pool = Union[pool.Pool, pool.ThreadPool] -S = TypeVar("S") -T = TypeVar("T") - -# On platforms without sem_open, multiprocessing[.dummy] Pool -# cannot be created. -try: - import multiprocessing.synchronize # noqa -except ImportError: - LACK_SEM_OPEN = True -else: - LACK_SEM_OPEN = False - -# Incredibly large timeout to work around bpo-8296 on Python 2. -TIMEOUT = 2000000 - - -@contextmanager -def closing(pool): - # type: (Pool) -> Iterator[Pool] - """Return a context manager making sure the pool closes properly.""" - try: - yield pool - finally: - # For Pool.imap*, close and join are needed - # for the returned iterator to begin yielding. - pool.close() - pool.join() - pool.terminate() - - -def _map_fallback(func, iterable, chunksize=1): - # type: (Callable[[S], T], Iterable[S], int) -> Iterator[T] - """Make an iterator applying func to each element in iterable. - - This function is the sequential fallback either on Python 2 - where Pool.imap* doesn't react to KeyboardInterrupt - or when sem_open is unavailable. - """ - return map(func, iterable) - - -def _map_multiprocess(func, iterable, chunksize=1): - # type: (Callable[[S], T], Iterable[S], int) -> Iterator[T] - """Chop iterable into chunks and submit them to a process pool. - - For very long iterables using a large value for chunksize can make - the job complete much faster than using the default value of 1. - - Return an unordered iterator of the results. - """ - with closing(ProcessPool()) as pool: - return pool.imap_unordered(func, iterable, chunksize) - - -def _map_multithread(func, iterable, chunksize=1): - # type: (Callable[[S], T], Iterable[S], int) -> Iterator[T] - """Chop iterable into chunks and submit them to a thread pool. - - For very long iterables using a large value for chunksize can make - the job complete much faster than using the default value of 1. - - Return an unordered iterator of the results. - """ - with closing(ThreadPool(DEFAULT_POOLSIZE)) as pool: - return pool.imap_unordered(func, iterable, chunksize) - - -if LACK_SEM_OPEN: - map_multiprocess = map_multithread = _map_fallback -else: - map_multiprocess = _map_multiprocess - map_multithread = _map_multithread diff --git a/env/Lib/site-packages/pip/_internal/utils/pkg_resources.py b/env/Lib/site-packages/pip/_internal/utils/pkg_resources.py deleted file mode 100644 index ee1eca3008159068c2f78226abc3b446b27c5fb9..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_internal/utils/pkg_resources.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import Dict, Iterable, List - -from pip._vendor.pkg_resources import yield_lines - - -class DictMetadata: - """IMetadataProvider that reads metadata files from a dictionary.""" - - def __init__(self, metadata): - # type: (Dict[str, bytes]) -> None - self._metadata = metadata - - def has_metadata(self, name): - # type: (str) -> bool - return name in self._metadata - - def get_metadata(self, name): - # type: (str) -> str - try: - return self._metadata[name].decode() - except UnicodeDecodeError as e: - # Mirrors handling done in pkg_resources.NullProvider. - e.reason += f" in {name} file" - raise - - def get_metadata_lines(self, name): - # type: (str) -> Iterable[str] - return yield_lines(self.get_metadata(name)) - - def metadata_isdir(self, name): - # type: (str) -> bool - return False - - def metadata_listdir(self, name): - # type: (str) -> List[str] - return [] - - def run_script(self, script_name, namespace): - # type: (str, str) -> None - pass diff --git a/env/Lib/site-packages/pip/_internal/utils/setuptools_build.py b/env/Lib/site-packages/pip/_internal/utils/setuptools_build.py index 4b8e4b359f3149bc45e18d293a2a2772a174e235..96d1b2460670e20ac92a5ade7a74b7ab1cba71d8 100644 --- a/env/Lib/site-packages/pip/_internal/utils/setuptools_build.py +++ b/env/Lib/site-packages/pip/_internal/utils/setuptools_build.py @@ -1,30 +1,57 @@ import sys +import textwrap from typing import List, Optional, Sequence # Shim to wrap setup.py invocation with setuptools -# -# We set sys.argv[0] to the path to the underlying setup.py file so -# setuptools / distutils don't take the path to the setup.py to be "-c" when -# invoking via the shim. This avoids e.g. the following manifest_maker -# warning: "warning: manifest_maker: standard file '-c' not found". -_SETUPTOOLS_SHIM = ( - "import io, os, sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};" - "f = getattr(tokenize, 'open', open)(__file__) " - "if os.path.exists(__file__) " - "else io.StringIO('from setuptools import setup; setup()');" - "code = f.read().replace('\\r\\n', '\\n');" - "f.close();" - "exec(compile(code, __file__, 'exec'))" -) +# Note that __file__ is handled via two {!r} *and* %r, to ensure that paths on +# Windows are correctly handled (it should be "C:\\Users" not "C:\Users"). +_SETUPTOOLS_SHIM = textwrap.dedent( + """ + exec(compile(''' + # This is <pip-setuptools-caller> -- a caller that pip uses to run setup.py + # + # - It imports setuptools before invoking setup.py, to enable projects that directly + # import from `distutils.core` to work with newer packaging standards. + # - It provides a clear error message when setuptools is not installed. + # - It sets `sys.argv[0]` to the underlying `setup.py`, when invoking `setup.py` so + # setuptools doesn't think the script is `-c`. This avoids the following warning: + # manifest_maker: standard file '-c' not found". + # - It generates a shim setup.py, for handling setup.cfg-only projects. + import os, sys, tokenize + + try: + import setuptools + except ImportError as error: + print( + "ERROR: Can not execute `setup.py` since setuptools is not available in " + "the build environment.", + file=sys.stderr, + ) + sys.exit(1) + + __file__ = %r + sys.argv[0] = __file__ + + if os.path.exists(__file__): + filename = __file__ + with tokenize.open(__file__) as f: + setup_py_code = f.read() + else: + filename = "<auto-generated setuptools caller>" + setup_py_code = "from setuptools import setup; setup()" + + exec(compile(setup_py_code, filename, "exec")) + ''' % ({!r},), "<pip-setuptools-caller>", "exec")) + """ +).rstrip() def make_setuptools_shim_args( - setup_py_path, # type: str - global_options=None, # type: Sequence[str] - no_user_config=False, # type: bool - unbuffered_output=False, # type: bool -): - # type: (...) -> List[str] + setup_py_path: str, + global_options: Optional[Sequence[str]] = None, + no_user_config: bool = False, + unbuffered_output: bool = False, +) -> List[str]: """ Get setuptools command arguments with shim wrapped setup file invocation. @@ -46,12 +73,11 @@ def make_setuptools_shim_args( def make_setuptools_bdist_wheel_args( - setup_py_path, # type: str - global_options, # type: Sequence[str] - build_options, # type: Sequence[str] - destination_dir, # type: str -): - # type: (...) -> List[str] + setup_py_path: str, + global_options: Sequence[str], + build_options: Sequence[str], + destination_dir: str, +) -> List[str]: # NOTE: Eventually, we'd want to also -S to the flags here, when we're # isolating. Currently, it breaks Python in virtualenvs, because it # relies on site.py to find parts of the standard library outside the @@ -65,10 +91,9 @@ def make_setuptools_bdist_wheel_args( def make_setuptools_clean_args( - setup_py_path, # type: str - global_options, # type: Sequence[str] -): - # type: (...) -> List[str] + setup_py_path: str, + global_options: Sequence[str], +) -> List[str]: args = make_setuptools_shim_args( setup_py_path, global_options=global_options, unbuffered_output=True ) @@ -77,15 +102,14 @@ def make_setuptools_clean_args( def make_setuptools_develop_args( - setup_py_path, # type: str - global_options, # type: Sequence[str] - install_options, # type: Sequence[str] - no_user_config, # type: bool - prefix, # type: Optional[str] - home, # type: Optional[str] - use_user_site, # type: bool -): - # type: (...) -> List[str] + setup_py_path: str, + *, + global_options: Sequence[str], + no_user_config: bool, + prefix: Optional[str], + home: Optional[str], + use_user_site: bool, +) -> List[str]: assert not (use_user_site and prefix) args = make_setuptools_shim_args( @@ -96,8 +120,6 @@ def make_setuptools_develop_args( args += ["develop", "--no-deps"] - args += install_options - if prefix: args += ["--prefix", prefix] if home is not None: @@ -110,11 +132,10 @@ def make_setuptools_develop_args( def make_setuptools_egg_info_args( - setup_py_path, # type: str - egg_info_dir, # type: Optional[str] - no_user_config, # type: bool -): - # type: (...) -> List[str] + setup_py_path: str, + egg_info_dir: Optional[str], + no_user_config: bool, +) -> List[str]: args = make_setuptools_shim_args(setup_py_path, no_user_config=no_user_config) args += ["egg_info"] @@ -123,51 +144,3 @@ def make_setuptools_egg_info_args( args += ["--egg-base", egg_info_dir] return args - - -def make_setuptools_install_args( - setup_py_path, # type: str - global_options, # type: Sequence[str] - install_options, # type: Sequence[str] - record_filename, # type: str - root, # type: Optional[str] - prefix, # type: Optional[str] - header_dir, # type: Optional[str] - home, # type: Optional[str] - use_user_site, # type: bool - no_user_config, # type: bool - pycompile, # type: bool -): - # type: (...) -> List[str] - assert not (use_user_site and prefix) - assert not (use_user_site and root) - - args = make_setuptools_shim_args( - setup_py_path, - global_options=global_options, - no_user_config=no_user_config, - unbuffered_output=True, - ) - args += ["install", "--record", record_filename] - args += ["--single-version-externally-managed"] - - if root is not None: - args += ["--root", root] - if prefix is not None: - args += ["--prefix", prefix] - if home is not None: - args += ["--home", home] - if use_user_site: - args += ["--user", "--prefix="] - - if pycompile: - args += ["--compile"] - else: - args += ["--no-compile"] - - if header_dir: - args += ["--install-headers", header_dir] - - args += install_options - - return args diff --git a/env/Lib/site-packages/pip/_internal/utils/subprocess.py b/env/Lib/site-packages/pip/_internal/utils/subprocess.py index 2c8cf21231d7e956a1fad8219ceb34bb10ad76de..1e8ff50edfb8059799b334325e65eea9bb9b1ab3 100644 --- a/env/Lib/site-packages/pip/_internal/utils/subprocess.py +++ b/env/Lib/site-packages/pip/_internal/utils/subprocess.py @@ -2,25 +2,38 @@ import logging import os import shlex import subprocess -from typing import Any, Callable, Iterable, List, Mapping, Optional, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterable, + List, + Mapping, + Optional, + Union, +) + +from pip._vendor.rich.markup import escape from pip._internal.cli.spinners import SpinnerInterface, open_spinner from pip._internal.exceptions import InstallationSubprocessError -from pip._internal.utils.logging import subprocess_logger +from pip._internal.utils.logging import VERBOSE, subprocess_logger from pip._internal.utils.misc import HiddenText -CommandArgs = List[Union[str, HiddenText]] - +if TYPE_CHECKING: + # Literal was introduced in Python 3.8. + # + # TODO: Remove `if TYPE_CHECKING` when dropping support for Python 3.7. + from typing import Literal -LOG_DIVIDER = "----------------------------------------" +CommandArgs = List[Union[str, HiddenText]] -def make_command(*args): - # type: (Union[str, HiddenText, CommandArgs]) -> CommandArgs +def make_command(*args: Union[str, HiddenText, CommandArgs]) -> CommandArgs: """ Create a CommandArgs object. """ - command_args = [] # type: CommandArgs + command_args: CommandArgs = [] for arg in args: # Check for list instead of CommandArgs since CommandArgs is # only known during type-checking. @@ -33,8 +46,7 @@ def make_command(*args): return command_args -def format_command_args(args): - # type: (Union[List[str], CommandArgs]) -> str +def format_command_args(args: Union[List[str], CommandArgs]) -> str: """ Format command arguments for display. """ @@ -49,64 +61,27 @@ def format_command_args(args): ) -def reveal_command_args(args): - # type: (Union[List[str], CommandArgs]) -> List[str] +def reveal_command_args(args: Union[List[str], CommandArgs]) -> List[str]: """ Return the arguments in their raw, unredacted form. """ return [arg.secret if isinstance(arg, HiddenText) else arg for arg in args] -def make_subprocess_output_error( - cmd_args, # type: Union[List[str], CommandArgs] - cwd, # type: Optional[str] - lines, # type: List[str] - exit_status, # type: int -): - # type: (...) -> str - """ - Create and return the error message to use to log a subprocess error - with command output. - - :param lines: A list of lines, each ending with a newline. - """ - command = format_command_args(cmd_args) - - # We know the joined output value ends in a newline. - output = "".join(lines) - msg = ( - # Use a unicode string to avoid "UnicodeEncodeError: 'ascii' - # codec can't encode character ..." in Python 2 when a format - # argument (e.g. `output`) has a non-ascii character. - "Command errored out with exit status {exit_status}:\n" - " command: {command_display}\n" - " cwd: {cwd_display}\n" - "Complete output ({line_count} lines):\n{output}{divider}" - ).format( - exit_status=exit_status, - command_display=command, - cwd_display=cwd, - line_count=len(lines), - output=output, - divider=LOG_DIVIDER, - ) - return msg - - def call_subprocess( - cmd, # type: Union[List[str], CommandArgs] - show_stdout=False, # type: bool - cwd=None, # type: Optional[str] - on_returncode="raise", # type: str - extra_ok_returncodes=None, # type: Optional[Iterable[int]] - command_desc=None, # type: Optional[str] - extra_environ=None, # type: Optional[Mapping[str, Any]] - unset_environ=None, # type: Optional[Iterable[str]] - spinner=None, # type: Optional[SpinnerInterface] - log_failed_cmd=True, # type: Optional[bool] - stdout_only=False, # type: Optional[bool] -): - # type: (...) -> str + cmd: Union[List[str], CommandArgs], + show_stdout: bool = False, + cwd: Optional[str] = None, + on_returncode: 'Literal["raise", "warn", "ignore"]' = "raise", + extra_ok_returncodes: Optional[Iterable[int]] = None, + extra_environ: Optional[Mapping[str, Any]] = None, + unset_environ: Optional[Iterable[str]] = None, + spinner: Optional[SpinnerInterface] = None, + log_failed_cmd: Optional[bool] = True, + stdout_only: Optional[bool] = False, + *, + command_desc: str, +) -> str: """ Args: show_stdout: if true, use INFO to log the subprocess's stderr and @@ -141,13 +116,13 @@ def call_subprocess( # replaced by INFO. if show_stdout: # Then log the subprocess output at INFO level. - log_subprocess = subprocess_logger.info + log_subprocess: Callable[..., None] = subprocess_logger.info used_level = logging.INFO else: - # Then log the subprocess output using DEBUG. This also ensures + # Then log the subprocess output using VERBOSE. This also ensures # it will be logged to the log file (aka user_log), if enabled. - log_subprocess = subprocess_logger.debug - used_level = logging.DEBUG + log_subprocess = subprocess_logger.verbose + used_level = VERBOSE # Whether the subprocess will be visible in the console. showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level @@ -156,9 +131,6 @@ def call_subprocess( # and we have a spinner. use_spinner = not showing_subprocess and spinner is not None - if command_desc is None: - command_desc = format_command_args(cmd) - log_subprocess("Running command %s", command_desc) env = os.environ.copy() if extra_environ: @@ -191,7 +163,7 @@ def call_subprocess( proc.stdin.close() # In this mode, stdout and stderr are in the same pipe. while True: - line = proc.stdout.readline() # type: str + line: str = proc.stdout.readline() if not line: break line = line.rstrip() @@ -231,17 +203,25 @@ def call_subprocess( spinner.finish("done") if proc_had_error: if on_returncode == "raise": - if not showing_subprocess and log_failed_cmd: - # Then the subprocess streams haven't been logged to the - # console yet. - msg = make_subprocess_output_error( - cmd_args=cmd, - cwd=cwd, - lines=all_output, - exit_status=proc.returncode, + error = InstallationSubprocessError( + command_description=command_desc, + exit_code=proc.returncode, + output_lines=all_output if not showing_subprocess else None, + ) + if log_failed_cmd: + subprocess_logger.error("[present-rich] %s", error) + subprocess_logger.verbose( + "[bold magenta]full command[/]: [blue]%s[/]", + escape(format_command_args(cmd)), + extra={"markup": True}, ) - subprocess_logger.error(msg) - raise InstallationSubprocessError(proc.returncode, command_desc) + subprocess_logger.verbose( + "[bold magenta]cwd[/]: %s", + escape(cwd or "[inherit]"), + extra={"markup": True}, + ) + + raise error elif on_returncode == "warn": subprocess_logger.warning( 'Command "%s" had error code %s in %s', @@ -256,23 +236,22 @@ def call_subprocess( return output -def runner_with_spinner_message(message): - # type: (str) -> Callable[..., None] +def runner_with_spinner_message(message: str) -> Callable[..., None]: """Provide a subprocess_runner that shows a spinner message. - Intended for use with for pep517's Pep517HookCaller. Thus, the runner has - an API that matches what's expected by Pep517HookCaller.subprocess_runner. + Intended for use with for BuildBackendHookCaller. Thus, the runner has + an API that matches what's expected by BuildBackendHookCaller.subprocess_runner. """ def runner( - cmd, # type: List[str] - cwd=None, # type: Optional[str] - extra_environ=None, # type: Optional[Mapping[str, Any]] - ): - # type: (...) -> None + cmd: List[str], + cwd: Optional[str] = None, + extra_environ: Optional[Mapping[str, Any]] = None, + ) -> None: with open_spinner(message) as spinner: call_subprocess( cmd, + command_desc=message, cwd=cwd, extra_environ=extra_environ, spinner=spinner, diff --git a/env/Lib/site-packages/pip/_internal/utils/temp_dir.py b/env/Lib/site-packages/pip/_internal/utils/temp_dir.py index 477cbe6b1aafd69f86096ecc8d633950df0fd5ac..8ee8a1cb18017880cd0bebd66bc2cec5702118c6 100644 --- a/env/Lib/site-packages/pip/_internal/utils/temp_dir.py +++ b/env/Lib/site-packages/pip/_internal/utils/temp_dir.py @@ -4,7 +4,7 @@ import logging import os.path import tempfile from contextlib import ExitStack, contextmanager -from typing import Any, Dict, Iterator, Optional, TypeVar, Union +from typing import Any, Dict, Generator, Optional, TypeVar, Union from pip._internal.utils.misc import enum, rmtree @@ -22,12 +22,11 @@ tempdir_kinds = enum( ) -_tempdir_manager = None # type: Optional[ExitStack] +_tempdir_manager: Optional[ExitStack] = None @contextmanager -def global_tempdir_manager(): - # type: () -> Iterator[None] +def global_tempdir_manager() -> Generator[None, None, None]: global _tempdir_manager with ExitStack() as stack: old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack @@ -40,31 +39,27 @@ def global_tempdir_manager(): class TempDirectoryTypeRegistry: """Manages temp directory behavior""" - def __init__(self): - # type: () -> None - self._should_delete = {} # type: Dict[str, bool] + def __init__(self) -> None: + self._should_delete: Dict[str, bool] = {} - def set_delete(self, kind, value): - # type: (str, bool) -> None + def set_delete(self, kind: str, value: bool) -> None: """Indicate whether a TempDirectory of the given kind should be auto-deleted. """ self._should_delete[kind] = value - def get_delete(self, kind): - # type: (str) -> bool + def get_delete(self, kind: str) -> bool: """Get configured auto-delete flag for a given TempDirectory type, default True. """ return self._should_delete.get(kind, True) -_tempdir_registry = None # type: Optional[TempDirectoryTypeRegistry] +_tempdir_registry: Optional[TempDirectoryTypeRegistry] = None @contextmanager -def tempdir_registry(): - # type: () -> Iterator[TempDirectoryTypeRegistry] +def tempdir_registry() -> Generator[TempDirectoryTypeRegistry, None, None]: """Provides a scoped global tempdir registry that can be used to dictate whether directories should be deleted. """ @@ -107,10 +102,10 @@ class TempDirectory: def __init__( self, - path=None, # type: Optional[str] - delete=_default, # type: Union[bool, None, _Default] - kind="temp", # type: str - globally_managed=False, # type: bool + path: Optional[str] = None, + delete: Union[bool, None, _Default] = _default, + kind: str = "temp", + globally_managed: bool = False, ): super().__init__() @@ -139,21 +134,17 @@ class TempDirectory: _tempdir_manager.enter_context(self) @property - def path(self): - # type: () -> str + def path(self) -> str: assert not self._deleted, f"Attempted to access deleted path: {self._path}" return self._path - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: return f"<{self.__class__.__name__} {self.path!r}>" - def __enter__(self): - # type: (_T) -> _T + def __enter__(self: _T) -> _T: return self - def __exit__(self, exc, value, tb): - # type: (Any, Any, Any) -> None + def __exit__(self, exc: Any, value: Any, tb: Any) -> None: if self.delete is not None: delete = self.delete elif _tempdir_registry: @@ -164,8 +155,7 @@ class TempDirectory: if delete: self.cleanup() - def _create(self, kind): - # type: (str) -> str + def _create(self, kind: str) -> str: """Create a temporary directory and store its path in self.path""" # We realpath here because some systems have their default tmpdir # symlinked to another directory. This tends to confuse build @@ -175,8 +165,7 @@ class TempDirectory: logger.debug("Created temporary directory: %s", path) return path - def cleanup(self): - # type: () -> None + def cleanup(self) -> None: """Remove the temporary directory created and reset state""" self._deleted = True if not os.path.exists(self._path): @@ -206,14 +195,12 @@ class AdjacentTempDirectory(TempDirectory): # with leading '-' and invalid metadata LEADING_CHARS = "-~.=%0123456789" - def __init__(self, original, delete=None): - # type: (str, Optional[bool]) -> None + def __init__(self, original: str, delete: Optional[bool] = None) -> None: self.original = original.rstrip("/\\") super().__init__(delete=delete) @classmethod - def _generate_names(cls, name): - # type: (str) -> Iterator[str] + def _generate_names(cls, name: str) -> Generator[str, None, None]: """Generates a series of temporary names. The algorithm replaces the leading characters in the name @@ -238,8 +225,7 @@ class AdjacentTempDirectory(TempDirectory): if new_name != name: yield new_name - def _create(self, kind): - # type: (str) -> str + def _create(self, kind: str) -> str: root, name = os.path.split(self.original) for candidate in self._generate_names(name): path = os.path.join(root, candidate) diff --git a/env/Lib/site-packages/pip/_internal/utils/unpacking.py b/env/Lib/site-packages/pip/_internal/utils/unpacking.py index 44ac475357d685b7871e9e2b02db319089242b41..78b5c13ced3d0a429b6d292e2b0b985d50909942 100644 --- a/env/Lib/site-packages/pip/_internal/utils/unpacking.py +++ b/env/Lib/site-packages/pip/_internal/utils/unpacking.py @@ -40,16 +40,14 @@ except ImportError: logger.debug("lzma module is not available") -def current_umask(): - # type: () -> int +def current_umask() -> int: """Get the current umask which involves having to set it temporarily.""" mask = os.umask(0) os.umask(mask) return mask -def split_leading_dir(path): - # type: (str) -> List[str] +def split_leading_dir(path: str) -> List[str]: path = path.lstrip("/").lstrip("\\") if "/" in path and ( ("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path @@ -61,8 +59,7 @@ def split_leading_dir(path): return [path, ""] -def has_leading_dir(paths): - # type: (Iterable[str]) -> bool +def has_leading_dir(paths: Iterable[str]) -> bool: """Returns true if all the paths have the same leading path name (i.e., everything is in one subdirectory in an archive)""" common_prefix = None @@ -77,8 +74,7 @@ def has_leading_dir(paths): return True -def is_within_directory(directory, target): - # type: (str, str) -> bool +def is_within_directory(directory: str, target: str) -> bool: """ Return true if the absolute path of target is within the directory """ @@ -89,8 +85,7 @@ def is_within_directory(directory, target): return prefix == abs_directory -def set_extracted_file_to_default_mode_plus_executable(path): - # type: (str) -> None +def set_extracted_file_to_default_mode_plus_executable(path: str) -> None: """ Make file present at path have execute for user/group/world (chmod +x) is no-op on windows per python docs @@ -98,16 +93,14 @@ def set_extracted_file_to_default_mode_plus_executable(path): os.chmod(path, (0o777 & ~current_umask() | 0o111)) -def zip_item_is_executable(info): - # type: (ZipInfo) -> bool +def zip_item_is_executable(info: ZipInfo) -> bool: mode = info.external_attr >> 16 # if mode and regular file and any execute permissions for # user/group/world? return bool(mode and stat.S_ISREG(mode) and mode & 0o111) -def unzip_file(filename, location, flatten=True): - # type: (str, str, bool) -> None +def unzip_file(filename: str, location: str, flatten: bool = True) -> None: """ Unzip the file (with path `filename`) to the destination `location`. All files are written based on system defaults and umask (i.e. permissions are @@ -153,8 +146,7 @@ def unzip_file(filename, location, flatten=True): zipfp.close() -def untar_file(filename, location): - # type: (str, str) -> None +def untar_file(filename: str, location: str) -> None: """ Untar the file (with path `filename`) to the destination `location`. All files are written based on system defaults and umask (i.e. permissions @@ -178,7 +170,7 @@ def untar_file(filename, location): filename, ) mode = "r:*" - tar = tarfile.open(filename, mode) + tar = tarfile.open(filename, mode, encoding="utf-8") try: leading = has_leading_dir([member.name for member in tar.getmembers()]) for member in tar.getmembers(): @@ -196,8 +188,7 @@ def untar_file(filename, location): ensure_dir(path) elif member.issym(): try: - # https://github.com/python/typeshed/issues/2673 - tar._extract_member(member, path) # type: ignore + tar._extract_member(member, path) except Exception as exc: # Some corrupt tar files seem to produce this # (specifically bad symlinks) @@ -236,11 +227,10 @@ def untar_file(filename, location): def unpack_file( - filename, # type: str - location, # type: str - content_type=None, # type: Optional[str] -): - # type: (...) -> None + filename: str, + location: str, + content_type: Optional[str] = None, +) -> None: filename = os.path.realpath(filename) if ( content_type == "application/zip" diff --git a/env/Lib/site-packages/pip/_internal/utils/urls.py b/env/Lib/site-packages/pip/_internal/utils/urls.py index 50a04d861d442458cfa55dbd7bcae4f586dcb6e5..6ba2e04f350792e2c0021cf7ba7f40b25dc6cd51 100644 --- a/env/Lib/site-packages/pip/_internal/utils/urls.py +++ b/env/Lib/site-packages/pip/_internal/utils/urls.py @@ -1,19 +1,19 @@ import os -import sys +import string import urllib.parse import urllib.request from typing import Optional +from .compat import WINDOWS -def get_url_scheme(url): - # type: (str) -> Optional[str] + +def get_url_scheme(url: str) -> Optional[str]: if ":" not in url: return None return url.split(":", 1)[0].lower() -def path_to_url(path): - # type: (str) -> str +def path_to_url(path: str) -> str: """ Convert a path to a file: URL. The path will be made absolute and have quoted path parts. @@ -23,8 +23,7 @@ def path_to_url(path): return url -def url_to_path(url): - # type: (str) -> str +def url_to_path(url: str) -> str: """ Convert a file: URL to a path. """ @@ -37,7 +36,7 @@ def url_to_path(url): if not netloc or netloc == "localhost": # According to RFC 8089, same as empty authority. netloc = "" - elif sys.platform == "win32": + elif WINDOWS: # If we have a UNC path, prepend UNC share notation. netloc = "\\\\" + netloc else: @@ -46,4 +45,18 @@ def url_to_path(url): ) path = urllib.request.url2pathname(netloc + path) + + # On Windows, urlsplit parses the path as something like "/C:/Users/foo". + # This creates issues for path-related functions like io.open(), so we try + # to detect and strip the leading slash. + if ( + WINDOWS + and not netloc # Not UNC. + and len(path) >= 3 + and path[0] == "/" # Leading slash to strip. + and path[1] in string.ascii_letters # Drive letter. + and path[2:4] in (":", ":/") # Colon + end of string, or colon + absolute path. + ): + path = path[1:] + return path diff --git a/env/Lib/site-packages/pip/_internal/utils/virtualenv.py b/env/Lib/site-packages/pip/_internal/utils/virtualenv.py index 51cacb55ca1906c731e41cab71bd1fe23b4e4ccd..882e36f5c1de19a8200000c216cf80119b37c96d 100644 --- a/env/Lib/site-packages/pip/_internal/utils/virtualenv.py +++ b/env/Lib/site-packages/pip/_internal/utils/virtualenv.py @@ -11,8 +11,7 @@ _INCLUDE_SYSTEM_SITE_PACKAGES_REGEX = re.compile( ) -def _running_under_venv(): - # type: () -> bool +def _running_under_venv() -> bool: """Checks if sys.base_prefix and sys.prefix match. This handles PEP 405 compliant virtual environments. @@ -20,8 +19,7 @@ def _running_under_venv(): return sys.prefix != getattr(sys, "base_prefix", sys.prefix) -def _running_under_regular_virtualenv(): - # type: () -> bool +def _running_under_legacy_virtualenv() -> bool: """Checks if sys.real_prefix is set. This handles virtual environments created with pypa's virtualenv. @@ -30,14 +28,12 @@ def _running_under_regular_virtualenv(): return hasattr(sys, "real_prefix") -def running_under_virtualenv(): - # type: () -> bool - """Return True if we're running inside a virtualenv, False otherwise.""" - return _running_under_venv() or _running_under_regular_virtualenv() +def running_under_virtualenv() -> bool: + """True if we're running inside a virtual environment, False otherwise.""" + return _running_under_venv() or _running_under_legacy_virtualenv() -def _get_pyvenv_cfg_lines(): - # type: () -> Optional[List[str]] +def _get_pyvenv_cfg_lines() -> Optional[List[str]]: """Reads {sys.prefix}/pyvenv.cfg and returns its contents as list of lines Returns None, if it could not read/access the file. @@ -52,8 +48,7 @@ def _get_pyvenv_cfg_lines(): return None -def _no_global_under_venv(): - # type: () -> bool +def _no_global_under_venv() -> bool: """Check `{sys.prefix}/pyvenv.cfg` for system site-packages inclusion PEP 405 specifies that when system site-packages are not supposed to be @@ -82,8 +77,7 @@ def _no_global_under_venv(): return False -def _no_global_under_regular_virtualenv(): - # type: () -> bool +def _no_global_under_legacy_virtualenv() -> bool: """Check if "no-global-site-packages.txt" exists beside site.py This mirrors logic in pypa/virtualenv for determining whether system @@ -97,15 +91,14 @@ def _no_global_under_regular_virtualenv(): return os.path.exists(no_global_site_packages_file) -def virtualenv_no_global(): - # type: () -> bool +def virtualenv_no_global() -> bool: """Returns a boolean, whether running in venv with no system site-packages.""" # PEP 405 compliance needs to be checked first since virtualenv >=20 would # return True for both checks, but is only able to use the PEP 405 config. if _running_under_venv(): return _no_global_under_venv() - if _running_under_regular_virtualenv(): - return _no_global_under_regular_virtualenv() + if _running_under_legacy_virtualenv(): + return _no_global_under_legacy_virtualenv() return False diff --git a/env/Lib/site-packages/pip/_internal/utils/wheel.py b/env/Lib/site-packages/pip/_internal/utils/wheel.py index 42f080845cfa21d0c7b31d788badb800f0b81525..e5e3f34ed81453ce759c6ade8b2def733e9063e2 100644 --- a/env/Lib/site-packages/pip/_internal/utils/wheel.py +++ b/env/Lib/site-packages/pip/_internal/utils/wheel.py @@ -4,14 +4,12 @@ import logging from email.message import Message from email.parser import Parser -from typing import Dict, Tuple +from typing import Tuple from zipfile import BadZipFile, ZipFile from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.pkg_resources import DistInfoDistribution, Distribution from pip._internal.exceptions import UnsupportedWheel -from pip._internal.utils.pkg_resources import DictMetadata VERSION_COMPATIBLE = (1, 0) @@ -19,53 +17,7 @@ VERSION_COMPATIBLE = (1, 0) logger = logging.getLogger(__name__) -class WheelMetadata(DictMetadata): - """Metadata provider that maps metadata decoding exceptions to our - internal exception type. - """ - - def __init__(self, metadata, wheel_name): - # type: (Dict[str, bytes], str) -> None - super().__init__(metadata) - self._wheel_name = wheel_name - - def get_metadata(self, name): - # type: (str) -> str - try: - return super().get_metadata(name) - except UnicodeDecodeError as e: - # Augment the default error with the origin of the file. - raise UnsupportedWheel( - f"Error decoding metadata for {self._wheel_name}: {e}" - ) - - -def pkg_resources_distribution_for_wheel(wheel_zip, name, location): - # type: (ZipFile, str, str) -> Distribution - """Get a pkg_resources distribution given a wheel. - - :raises UnsupportedWheel: on any errors - """ - info_dir, _ = parse_wheel(wheel_zip, name) - - metadata_files = [p for p in wheel_zip.namelist() if p.startswith(f"{info_dir}/")] - - metadata_text = {} # type: Dict[str, bytes] - for path in metadata_files: - _, metadata_name = path.split("/", 1) - - try: - metadata_text[metadata_name] = read_wheel_metadata_file(wheel_zip, path) - except UnsupportedWheel as e: - raise UnsupportedWheel("{} has an invalid wheel, {}".format(name, str(e))) - - metadata = WheelMetadata(metadata_text, location) - - return DistInfoDistribution(location=location, metadata=metadata, project_name=name) - - -def parse_wheel(wheel_zip, name): - # type: (ZipFile, str) -> Tuple[str, Message] +def parse_wheel(wheel_zip: ZipFile, name: str) -> Tuple[str, Message]: """Extract information from the provided wheel, ensuring it meets basic standards. @@ -83,8 +35,7 @@ def parse_wheel(wheel_zip, name): return info_dir, metadata -def wheel_dist_info_dir(source, name): - # type: (ZipFile, str) -> str +def wheel_dist_info_dir(source: ZipFile, name: str) -> str: """Returns the name of the contained .dist-info directory. Raises AssertionError or UnsupportedWheel if not found, >1 found, or @@ -117,8 +68,7 @@ def wheel_dist_info_dir(source, name): return info_dir -def read_wheel_metadata_file(source, path): - # type: (ZipFile, str) -> bytes +def read_wheel_metadata_file(source: ZipFile, path: str) -> bytes: try: return source.read(path) # BadZipFile for general corruption, KeyError for missing entry, @@ -127,8 +77,7 @@ def read_wheel_metadata_file(source, path): raise UnsupportedWheel(f"could not read {path!r} file: {e!r}") -def wheel_metadata(source, dist_info_dir): - # type: (ZipFile, str) -> Message +def wheel_metadata(source: ZipFile, dist_info_dir: str) -> Message: """Return the WHEEL metadata of an extracted wheel, if possible. Otherwise, raise UnsupportedWheel. """ @@ -147,8 +96,7 @@ def wheel_metadata(source, dist_info_dir): return Parser().parsestr(wheel_text) -def wheel_version(wheel_data): - # type: (Message) -> Tuple[int, ...] +def wheel_version(wheel_data: Message) -> Tuple[int, ...]: """Given WHEEL metadata, return the parsed Wheel-Version. Otherwise, raise UnsupportedWheel. """ @@ -164,8 +112,7 @@ def wheel_version(wheel_data): raise UnsupportedWheel(f"invalid Wheel-Version: {version!r}") -def check_compatibility(version, name): - # type: (Tuple[int, ...], str) -> None +def check_compatibility(version: Tuple[int, ...], name: str) -> None: """Raises errors or warns if called with an incompatible Wheel-Version. pip should refuse to install a Wheel-Version that's a major series diff --git a/env/Lib/site-packages/pip/_internal/vcs/__init__.py b/env/Lib/site-packages/pip/_internal/vcs/__init__.py index 30025d632a9d9297a90b88d20ac127cce8aa5fac..b6beddbe6d24d2949dc89ed07abfebd59d8b63b9 100644 --- a/env/Lib/site-packages/pip/_internal/vcs/__init__.py +++ b/env/Lib/site-packages/pip/_internal/vcs/__init__.py @@ -8,6 +8,7 @@ import pip._internal.vcs.mercurial import pip._internal.vcs.subversion # noqa: F401 from pip._internal.vcs.versioncontrol import ( # noqa: F401 RemoteNotFoundError, + RemoteNotValidError, is_url, make_vcs_requirement_url, vcs, diff --git a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index e549a9ac9dbd8a2d84af83182525b1f79d174be6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-39.pyc deleted file mode 100644 index 2c9e5c9251b7c6adc92c946de63f4868b712065d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-39.pyc deleted file mode 100644 index 031887dbba1a53e17922590fbc35ec535f7a1f8a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/git.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-39.pyc deleted file mode 100644 index 1c526459dddd98473d542f480fa2345d22d255ec..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-39.pyc deleted file mode 100644 index e3997e1e3aab0aacb1ea4cc6fca98f5ed0143ec0..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-39.pyc b/env/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-39.pyc deleted file mode 100644 index 9db88480547ac3442228bbab60c5bd78f619fc13..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_internal/vcs/__pycache__/versioncontrol.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_internal/vcs/bazaar.py b/env/Lib/site-packages/pip/_internal/vcs/bazaar.py index 42b68773b146c4c94432c908addebd7a54e14294..20a17ed09272a09a5b3c0bfbd0e6c43f78db4c1e 100644 --- a/env/Lib/site-packages/pip/_internal/vcs/bazaar.py +++ b/env/Lib/site-packages/pip/_internal/vcs/bazaar.py @@ -16,61 +16,76 @@ logger = logging.getLogger(__name__) class Bazaar(VersionControl): - name = 'bzr' - dirname = '.bzr' - repo_name = 'branch' + name = "bzr" + dirname = ".bzr" + repo_name = "branch" schemes = ( - 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', - 'bzr+lp', 'bzr+file' + "bzr+http", + "bzr+https", + "bzr+ssh", + "bzr+sftp", + "bzr+ftp", + "bzr+lp", + "bzr+file", ) @staticmethod - def get_base_rev_args(rev): - # type: (str) -> List[str] - return ['-r', rev] + def get_base_rev_args(rev: str) -> List[str]: + return ["-r", rev] - def fetch_new(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: rev_display = rev_options.to_display() logger.info( - 'Checking out %s%s to %s', + "Checking out %s%s to %s", url, rev_display, display_path(dest), ) - cmd_args = ( - make_command('branch', '-q', rev_options.to_args(), url, dest) + if verbosity <= 0: + flag = "--quiet" + elif verbosity == 1: + flag = "" + else: + flag = f"-{'v'*verbosity}" + cmd_args = make_command( + "checkout", "--lightweight", flag, rev_options.to_args(), url, dest ) self.run_command(cmd_args) - def switch(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None - self.run_command(make_command('switch', url), cwd=dest) + def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: + self.run_command(make_command("switch", url), cwd=dest) - def update(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None - cmd_args = make_command('pull', '-q', rev_options.to_args()) + def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: + output = self.run_command( + make_command("info"), show_stdout=False, stdout_only=True, cwd=dest + ) + if output.startswith("Standalone "): + # Older versions of pip used to create standalone branches. + # Convert the standalone branch to a checkout by calling "bzr bind". + cmd_args = make_command("bind", "-q", url) + self.run_command(cmd_args, cwd=dest) + + cmd_args = make_command("update", "-q", rev_options.to_args()) self.run_command(cmd_args, cwd=dest) @classmethod - def get_url_rev_and_auth(cls, url): - # type: (str) -> Tuple[str, Optional[str], AuthInfo] - # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it + def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: + # hotfix the URL scheme after removing bzr+ from bzr+ssh:// re-add it url, rev, user_pass = super().get_url_rev_and_auth(url) - if url.startswith('ssh://'): - url = 'bzr+' + url + if url.startswith("ssh://"): + url = "bzr+" + url return url, rev, user_pass @classmethod - def get_remote_url(cls, location): - # type: (str) -> str + def get_remote_url(cls, location: str) -> str: urls = cls.run_command( - ['info'], show_stdout=False, stdout_only=True, cwd=location + ["info"], show_stdout=False, stdout_only=True, cwd=location ) for line in urls.splitlines(): line = line.strip() - for x in ('checkout of branch: ', - 'parent branch: '): + for x in ("checkout of branch: ", "parent branch: "): if line.startswith(x): repo = line.split(x)[1] if cls._is_local_repository(repo): @@ -79,16 +94,17 @@ class Bazaar(VersionControl): raise RemoteNotFoundError @classmethod - def get_revision(cls, location): - # type: (str) -> str + def get_revision(cls, location: str) -> str: revision = cls.run_command( - ['revno'], show_stdout=False, stdout_only=True, cwd=location, + ["revno"], + show_stdout=False, + stdout_only=True, + cwd=location, ) return revision.splitlines()[-1] @classmethod - def is_commit_id_equal(cls, dest, name): - # type: (str, Optional[str]) -> bool + def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: """Always assume the versions don't match""" return False diff --git a/env/Lib/site-packages/pip/_internal/vcs/git.py b/env/Lib/site-packages/pip/_internal/vcs/git.py index b7c1b9fe7b5273eba46a1caa1b0c6f8f60e9aac5..8d1d499376744954308bdf96f80e5b5a39a24195 100644 --- a/env/Lib/site-packages/pip/_internal/vcs/git.py +++ b/env/Lib/site-packages/pip/_internal/vcs/git.py @@ -1,22 +1,21 @@ import logging import os.path +import pathlib import re import urllib.parse import urllib.request from typing import List, Optional, Tuple -from pip._vendor.packaging.version import _BaseVersion -from pip._vendor.packaging.version import parse as parse_version - from pip._internal.exceptions import BadCommand, InstallationError from pip._internal.utils.misc import HiddenText, display_path, hide_url from pip._internal.utils.subprocess import make_command from pip._internal.vcs.versioncontrol import ( AuthInfo, RemoteNotFoundError, + RemoteNotValidError, RevOptions, VersionControl, - find_path_to_setup_from_repo_root, + find_path_to_project_root_from_repo_root, vcs, ) @@ -27,33 +26,57 @@ urlunsplit = urllib.parse.urlunsplit logger = logging.getLogger(__name__) -HASH_REGEX = re.compile('^[a-fA-F0-9]{40}$') +GIT_VERSION_REGEX = re.compile( + r"^git version " # Prefix. + r"(\d+)" # Major. + r"\.(\d+)" # Dot, minor. + r"(?:\.(\d+))?" # Optional dot, patch. + r".*$" # Suffix, including any pre- and post-release segments we don't care about. +) + +HASH_REGEX = re.compile("^[a-fA-F0-9]{40}$") + +# SCP (Secure copy protocol) shorthand. e.g. 'git@example.com:foo/bar.git' +SCP_REGEX = re.compile( + r"""^ + # Optional user, e.g. 'git@' + (\w+@)? + # Server, e.g. 'github.com'. + ([^/:]+): + # The server-side path. e.g. 'user/project.git'. Must start with an + # alphanumeric character so as not to be confusable with a Windows paths + # like 'C:/foo/bar' or 'C:\foo\bar'. + (\w[^:]*) + $""", + re.VERBOSE, +) -def looks_like_hash(sha): - # type: (str) -> bool +def looks_like_hash(sha: str) -> bool: return bool(HASH_REGEX.match(sha)) class Git(VersionControl): - name = 'git' - dirname = '.git' - repo_name = 'clone' + name = "git" + dirname = ".git" + repo_name = "clone" schemes = ( - 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', + "git+http", + "git+https", + "git+ssh", + "git+git", + "git+file", ) # Prevent the user's environment variables from interfering with pip: # https://github.com/pypa/pip/issues/1130 - unset_environ = ('GIT_DIR', 'GIT_WORK_TREE') - default_arg_rev = 'HEAD' + unset_environ = ("GIT_DIR", "GIT_WORK_TREE") + default_arg_rev = "HEAD" @staticmethod - def get_base_rev_args(rev): - # type: (str) -> List[str] + def get_base_rev_args(rev: str) -> List[str]: return [rev] - def is_immutable_rev_checkout(self, url, dest): - # type: (str, str) -> bool + def is_immutable_rev_checkout(self, url: str, dest: str) -> bool: _, rev_options = self.get_url_rev_options(hide_url(url)) if not rev_options.rev: return False @@ -64,30 +87,24 @@ class Git(VersionControl): # return False in the rare case rev is both a commit hash # and a tag or a branch; we don't want to cache in that case # because that branch/tag could point to something else in the future - is_tag_or_branch = bool( - self.get_revision_sha(dest, rev_options.rev)[0] - ) + is_tag_or_branch = bool(self.get_revision_sha(dest, rev_options.rev)[0]) return not is_tag_or_branch - def get_git_version(self): - # type: () -> _BaseVersion - VERSION_PFX = 'git version ' + def get_git_version(self) -> Tuple[int, ...]: version = self.run_command( - ['version'], show_stdout=False, stdout_only=True + ["version"], + command_desc="git version", + show_stdout=False, + stdout_only=True, ) - if version.startswith(VERSION_PFX): - version = version[len(VERSION_PFX):].split()[0] - else: - version = '' - # get first 3 positions of the git version because - # on windows it is x.y.z.windows.t, and this parses as - # LegacyVersion which always smaller than a Version. - version = '.'.join(version.split('.')[:3]) - return parse_version(version) + match = GIT_VERSION_REGEX.match(version) + if not match: + logger.warning("Can't parse git version: %s", version) + return () + return tuple(int(c) for c in match.groups()) @classmethod - def get_current_branch(cls, location): - # type: (str) -> Optional[str] + def get_current_branch(cls, location: str) -> Optional[str]: """ Return the current branch, or None if HEAD isn't at a branch (e.g. detached HEAD). @@ -96,24 +113,23 @@ class Git(VersionControl): # HEAD rather than a symbolic ref. In addition, the -q causes the # command to exit with status code 1 instead of 128 in this case # and to suppress the message to stderr. - args = ['symbolic-ref', '-q', 'HEAD'] + args = ["symbolic-ref", "-q", "HEAD"] output = cls.run_command( args, - extra_ok_returncodes=(1, ), + extra_ok_returncodes=(1,), show_stdout=False, stdout_only=True, cwd=location, ) ref = output.strip() - if ref.startswith('refs/heads/'): - return ref[len('refs/heads/'):] + if ref.startswith("refs/heads/"): + return ref[len("refs/heads/") :] return None @classmethod - def get_revision_sha(cls, dest, rev): - # type: (str, str) -> Tuple[Optional[str], bool] + def get_revision_sha(cls, dest: str, rev: str) -> Tuple[Optional[str], bool]: """ Return (sha_or_none, is_branch), where sha_or_none is a commit hash if the revision names a remote branch or tag, otherwise None. @@ -124,11 +140,11 @@ class Git(VersionControl): """ # Pass rev to pre-filter the list. output = cls.run_command( - ['show-ref', rev], + ["show-ref", rev], cwd=dest, show_stdout=False, stdout_only=True, - on_returncode='ignore', + on_returncode="ignore", ) refs = {} # NOTE: We do not use splitlines here since that would split on other @@ -143,12 +159,12 @@ class Git(VersionControl): except ValueError: # Include the offending line to simplify troubleshooting if # this error ever occurs. - raise ValueError(f'unexpected show-ref line: {line!r}') + raise ValueError(f"unexpected show-ref line: {line!r}") refs[ref_name] = ref_sha - branch_ref = f'refs/remotes/origin/{rev}' - tag_ref = f'refs/tags/{rev}' + branch_ref = f"refs/remotes/origin/{rev}" + tag_ref = f"refs/tags/{rev}" sha = refs.get(branch_ref) if sha is not None: @@ -159,8 +175,7 @@ class Git(VersionControl): return (sha, False) @classmethod - def _should_fetch(cls, dest, rev): - # type: (str, str) -> bool + def _should_fetch(cls, dest: str, rev: str) -> bool: """ Return true if rev is a ref or is a commit that we don't have locally. @@ -183,8 +198,9 @@ class Git(VersionControl): return True @classmethod - def resolve_revision(cls, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> RevOptions + def resolve_revision( + cls, dest: str, url: HiddenText, rev_options: RevOptions + ) -> RevOptions: """ Resolve a revision to a new RevOptions object with the SHA1 of the branch, tag, or ref if found. @@ -218,18 +234,17 @@ class Git(VersionControl): # fetch the requested revision cls.run_command( - make_command('fetch', '-q', url, rev_options.to_args()), + make_command("fetch", "-q", url, rev_options.to_args()), cwd=dest, ) # Change the revision to the SHA of the ref we fetched - sha = cls.get_revision(dest, rev='FETCH_HEAD') + sha = cls.get_revision(dest, rev="FETCH_HEAD") rev_options = rev_options.make_new(sha) return rev_options @classmethod - def is_commit_id_equal(cls, dest, name): - # type: (str, Optional[str]) -> bool + def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: """ Return whether the current commit hash equals the given name. @@ -243,65 +258,95 @@ class Git(VersionControl): return cls.get_revision(dest) == name - def fetch_new(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: rev_display = rev_options.to_display() - logger.info('Cloning %s%s to %s', url, rev_display, display_path(dest)) - self.run_command(make_command('clone', '-q', url, dest)) + logger.info("Cloning %s%s to %s", url, rev_display, display_path(dest)) + if verbosity <= 0: + flags: Tuple[str, ...] = ("--quiet",) + elif verbosity == 1: + flags = () + else: + flags = ("--verbose", "--progress") + if self.get_git_version() >= (2, 17): + # Git added support for partial clone in 2.17 + # https://git-scm.com/docs/partial-clone + # Speeds up cloning by functioning without a complete copy of repository + self.run_command( + make_command( + "clone", + "--filter=blob:none", + *flags, + url, + dest, + ) + ) + else: + self.run_command(make_command("clone", *flags, url, dest)) if rev_options.rev: # Then a specific revision was requested. rev_options = self.resolve_revision(dest, url, rev_options) - branch_name = getattr(rev_options, 'branch_name', None) + branch_name = getattr(rev_options, "branch_name", None) + logger.debug("Rev options %s, branch_name %s", rev_options, branch_name) if branch_name is None: # Only do a checkout if the current commit id doesn't match # the requested revision. if not self.is_commit_id_equal(dest, rev_options.rev): cmd_args = make_command( - 'checkout', '-q', rev_options.to_args(), + "checkout", + "-q", + rev_options.to_args(), ) self.run_command(cmd_args, cwd=dest) elif self.get_current_branch(dest) != branch_name: # Then a specific branch was requested, and that branch # is not yet checked out. - track_branch = f'origin/{branch_name}' + track_branch = f"origin/{branch_name}" cmd_args = [ - 'checkout', '-b', branch_name, '--track', track_branch, + "checkout", + "-b", + branch_name, + "--track", + track_branch, ] self.run_command(cmd_args, cwd=dest) + else: + sha = self.get_revision(dest) + rev_options = rev_options.make_new(sha) + + logger.info("Resolved %s to commit %s", url, rev_options.rev) #: repo may contain submodules self.update_submodules(dest) - def switch(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: self.run_command( - make_command('config', 'remote.origin.url', url), + make_command("config", "remote.origin.url", url), cwd=dest, ) - cmd_args = make_command('checkout', '-q', rev_options.to_args()) + cmd_args = make_command("checkout", "-q", rev_options.to_args()) self.run_command(cmd_args, cwd=dest) self.update_submodules(dest) - def update(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: # First fetch changes from the default remote - if self.get_git_version() >= parse_version('1.9.0'): + if self.get_git_version() >= (1, 9): # fetch tags in addition to everything else - self.run_command(['fetch', '-q', '--tags'], cwd=dest) + self.run_command(["fetch", "-q", "--tags"], cwd=dest) else: - self.run_command(['fetch', '-q'], cwd=dest) + self.run_command(["fetch", "-q"], cwd=dest) # Then reset to wanted revision (maybe even origin/master) rev_options = self.resolve_revision(dest, url, rev_options) - cmd_args = make_command('reset', '--hard', '-q', rev_options.to_args()) + cmd_args = make_command("reset", "--hard", "-q", rev_options.to_args()) self.run_command(cmd_args, cwd=dest) #: update submodules self.update_submodules(dest) @classmethod - def get_remote_url(cls, location): - # type: (str) -> str + def get_remote_url(cls, location: str) -> str: """ Return URL of the first remote encountered. @@ -311,8 +356,8 @@ class Git(VersionControl): # We need to pass 1 for extra_ok_returncodes since the command # exits with return code 1 if there are no matching lines. stdout = cls.run_command( - ['config', '--get-regexp', r'remote\..*\.url'], - extra_ok_returncodes=(1, ), + ["config", "--get-regexp", r"remote\..*\.url"], + extra_ok_returncodes=(1,), show_stdout=False, stdout_only=True, cwd=location, @@ -324,21 +369,51 @@ class Git(VersionControl): raise RemoteNotFoundError for remote in remotes: - if remote.startswith('remote.origin.url '): + if remote.startswith("remote.origin.url "): found_remote = remote break - url = found_remote.split(' ')[1] - return url.strip() + url = found_remote.split(" ")[1] + return cls._git_remote_to_pip_url(url.strip()) + + @staticmethod + def _git_remote_to_pip_url(url: str) -> str: + """ + Convert a remote url from what git uses to what pip accepts. + + There are 3 legal forms **url** may take: + + 1. A fully qualified url: ssh://git@example.com/foo/bar.git + 2. A local project.git folder: /path/to/bare/repository.git + 3. SCP shorthand for form 1: git@example.com:foo/bar.git + + Form 1 is output as-is. Form 2 must be converted to URI and form 3 must + be converted to form 1. + + See the corresponding test test_git_remote_url_to_pip() for examples of + sample inputs/outputs. + """ + if re.match(r"\w+://", url): + # This is already valid. Pass it though as-is. + return url + if os.path.exists(url): + # A local bare remote (git clone --mirror). + # Needs a file:// prefix. + return pathlib.PurePath(url).as_uri() + scp_match = SCP_REGEX.match(url) + if scp_match: + # Add an ssh:// prefix and replace the ':' with a '/'. + return scp_match.expand(r"ssh://\1\2/\3") + # Otherwise, bail out. + raise RemoteNotValidError(url) @classmethod - def has_commit(cls, location, rev): - # type: (str, str) -> bool + def has_commit(cls, location: str, rev: str) -> bool: """ Check if rev is a commit that is available in the local repository. """ try: cls.run_command( - ['rev-parse', '-q', '--verify', "sha^" + rev], + ["rev-parse", "-q", "--verify", "sha^" + rev], cwd=location, log_failed_cmd=False, ) @@ -348,12 +423,11 @@ class Git(VersionControl): return True @classmethod - def get_revision(cls, location, rev=None): - # type: (str, Optional[str]) -> str + def get_revision(cls, location: str, rev: Optional[str] = None) -> str: if rev is None: - rev = 'HEAD' + rev = "HEAD" current_rev = cls.run_command( - ['rev-parse', rev], + ["rev-parse", rev], show_stdout=False, stdout_only=True, cwd=location, @@ -361,27 +435,25 @@ class Git(VersionControl): return current_rev.strip() @classmethod - def get_subdirectory(cls, location): - # type: (str) -> Optional[str] + def get_subdirectory(cls, location: str) -> Optional[str]: """ - Return the path to setup.py, relative to the repo root. - Return None if setup.py is in the repo root. + Return the path to Python project root, relative to the repo root. + Return None if the project root is in the repo root. """ # find the repo root git_dir = cls.run_command( - ['rev-parse', '--git-dir'], + ["rev-parse", "--git-dir"], show_stdout=False, stdout_only=True, cwd=location, ).strip() if not os.path.isabs(git_dir): git_dir = os.path.join(location, git_dir) - repo_root = os.path.abspath(os.path.join(git_dir, '..')) - return find_path_to_setup_from_repo_root(location, repo_root) + repo_root = os.path.abspath(os.path.join(git_dir, "..")) + return find_path_to_project_root_from_repo_root(location, repo_root) @classmethod - def get_url_rev_and_auth(cls, url): - # type: (str) -> Tuple[str, Optional[str], AuthInfo] + def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: """ Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. That's required because although they use SSH they sometimes don't @@ -391,60 +463,64 @@ class Git(VersionControl): # Works around an apparent Git bug # (see https://article.gmane.org/gmane.comp.version-control.git/146500) scheme, netloc, path, query, fragment = urlsplit(url) - if scheme.endswith('file'): - initial_slashes = path[:-len(path.lstrip('/'))] - newpath = ( - initial_slashes + - urllib.request.url2pathname(path) - .replace('\\', '/').lstrip('/') - ) - after_plus = scheme.find('+') + 1 + if scheme.endswith("file"): + initial_slashes = path[: -len(path.lstrip("/"))] + newpath = initial_slashes + urllib.request.url2pathname(path).replace( + "\\", "/" + ).lstrip("/") + after_plus = scheme.find("+") + 1 url = scheme[:after_plus] + urlunsplit( (scheme[after_plus:], netloc, newpath, query, fragment), ) - if '://' not in url: - assert 'file:' not in url - url = url.replace('git+', 'git+ssh://') + if "://" not in url: + assert "file:" not in url + url = url.replace("git+", "git+ssh://") url, rev, user_pass = super().get_url_rev_and_auth(url) - url = url.replace('ssh://', '') + url = url.replace("ssh://", "") else: url, rev, user_pass = super().get_url_rev_and_auth(url) return url, rev, user_pass @classmethod - def update_submodules(cls, location): - # type: (str) -> None - if not os.path.exists(os.path.join(location, '.gitmodules')): + def update_submodules(cls, location: str) -> None: + if not os.path.exists(os.path.join(location, ".gitmodules")): return cls.run_command( - ['submodule', 'update', '--init', '--recursive', '-q'], + ["submodule", "update", "--init", "--recursive", "-q"], cwd=location, ) @classmethod - def get_repository_root(cls, location): - # type: (str) -> Optional[str] + def get_repository_root(cls, location: str) -> Optional[str]: loc = super().get_repository_root(location) if loc: return loc try: r = cls.run_command( - ['rev-parse', '--show-toplevel'], + ["rev-parse", "--show-toplevel"], cwd=location, show_stdout=False, stdout_only=True, - on_returncode='raise', + on_returncode="raise", log_failed_cmd=False, ) except BadCommand: - logger.debug("could not determine if %s is under git control " - "because git is not available", location) + logger.debug( + "could not determine if %s is under git control " + "because git is not available", + location, + ) return None except InstallationError: return None - return os.path.normpath(r.rstrip('\r\n')) + return os.path.normpath(r.rstrip("\r\n")) + + @staticmethod + def should_add_vcs_url_prefix(repo_url: str) -> bool: + """In either https or ssh form, requirements must be prefixed with git+.""" + return True vcs.register(Git) diff --git a/env/Lib/site-packages/pip/_internal/vcs/mercurial.py b/env/Lib/site-packages/pip/_internal/vcs/mercurial.py index b4f887d327be88f98fe8bb6c26aec8b3780c6263..4595960b5bfff671449235d51a0b9312e7d6c5d1 100644 --- a/env/Lib/site-packages/pip/_internal/vcs/mercurial.py +++ b/env/Lib/site-packages/pip/_internal/vcs/mercurial.py @@ -1,7 +1,7 @@ import configparser import logging import os -from typing import List, Optional +from typing import List, Optional, Tuple from pip._internal.exceptions import BadCommand, InstallationError from pip._internal.utils.misc import HiddenText, display_path @@ -10,7 +10,7 @@ from pip._internal.utils.urls import path_to_url from pip._internal.vcs.versioncontrol import ( RevOptions, VersionControl, - find_path_to_setup_from_repo_root, + find_path_to_project_root_from_repo_root, vcs, ) @@ -18,61 +18,68 @@ logger = logging.getLogger(__name__) class Mercurial(VersionControl): - name = 'hg' - dirname = '.hg' - repo_name = 'clone' + name = "hg" + dirname = ".hg" + repo_name = "clone" schemes = ( - 'hg+file', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http', + "hg+file", + "hg+http", + "hg+https", + "hg+ssh", + "hg+static-http", ) @staticmethod - def get_base_rev_args(rev): - # type: (str) -> List[str] - return [rev] + def get_base_rev_args(rev: str) -> List[str]: + return ["-r", rev] - def fetch_new(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: rev_display = rev_options.to_display() logger.info( - 'Cloning hg %s%s to %s', + "Cloning hg %s%s to %s", url, rev_display, display_path(dest), ) - self.run_command(make_command('clone', '--noupdate', '-q', url, dest)) + if verbosity <= 0: + flags: Tuple[str, ...] = ("--quiet",) + elif verbosity == 1: + flags = () + elif verbosity == 2: + flags = ("--verbose",) + else: + flags = ("--verbose", "--debug") + self.run_command(make_command("clone", "--noupdate", *flags, url, dest)) self.run_command( - make_command('update', '-q', rev_options.to_args()), + make_command("update", *flags, rev_options.to_args()), cwd=dest, ) - def switch(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None - repo_config = os.path.join(dest, self.dirname, 'hgrc') + def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: + repo_config = os.path.join(dest, self.dirname, "hgrc") config = configparser.RawConfigParser() try: config.read(repo_config) - config.set('paths', 'default', url.secret) - with open(repo_config, 'w') as config_file: + config.set("paths", "default", url.secret) + with open(repo_config, "w") as config_file: config.write(config_file) except (OSError, configparser.NoSectionError) as exc: - logger.warning( - 'Could not switch Mercurial repository to %s: %s', url, exc, - ) + logger.warning("Could not switch Mercurial repository to %s: %s", url, exc) else: - cmd_args = make_command('update', '-q', rev_options.to_args()) + cmd_args = make_command("update", "-q", rev_options.to_args()) self.run_command(cmd_args, cwd=dest) - def update(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None - self.run_command(['pull', '-q'], cwd=dest) - cmd_args = make_command('update', '-q', rev_options.to_args()) + def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: + self.run_command(["pull", "-q"], cwd=dest) + cmd_args = make_command("update", "-q", rev_options.to_args()) self.run_command(cmd_args, cwd=dest) @classmethod - def get_remote_url(cls, location): - # type: (str) -> str + def get_remote_url(cls, location: str) -> str: url = cls.run_command( - ['showconfig', 'paths.default'], + ["showconfig", "paths.default"], show_stdout=False, stdout_only=True, cwd=location, @@ -82,13 +89,12 @@ class Mercurial(VersionControl): return url.strip() @classmethod - def get_revision(cls, location): - # type: (str) -> str + def get_revision(cls, location: str) -> str: """ Return the repository-local changeset revision number, as an integer. """ current_revision = cls.run_command( - ['parents', '--template={rev}'], + ["parents", "--template={rev}"], show_stdout=False, stdout_only=True, cwd=location, @@ -96,14 +102,13 @@ class Mercurial(VersionControl): return current_revision @classmethod - def get_requirement_revision(cls, location): - # type: (str) -> str + def get_requirement_revision(cls, location: str) -> str: """ Return the changeset identification hash, as a 40-character hexadecimal string """ current_rev_hash = cls.run_command( - ['parents', '--template={node}'], + ["parents", "--template={node}"], show_stdout=False, stdout_only=True, cwd=location, @@ -111,48 +116,48 @@ class Mercurial(VersionControl): return current_rev_hash @classmethod - def is_commit_id_equal(cls, dest, name): - # type: (str, Optional[str]) -> bool + def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: """Always assume the versions don't match""" return False @classmethod - def get_subdirectory(cls, location): - # type: (str) -> Optional[str] + def get_subdirectory(cls, location: str) -> Optional[str]: """ - Return the path to setup.py, relative to the repo root. - Return None if setup.py is in the repo root. + Return the path to Python project root, relative to the repo root. + Return None if the project root is in the repo root. """ # find the repo root repo_root = cls.run_command( - ['root'], show_stdout=False, stdout_only=True, cwd=location + ["root"], show_stdout=False, stdout_only=True, cwd=location ).strip() if not os.path.isabs(repo_root): repo_root = os.path.abspath(os.path.join(location, repo_root)) - return find_path_to_setup_from_repo_root(location, repo_root) + return find_path_to_project_root_from_repo_root(location, repo_root) @classmethod - def get_repository_root(cls, location): - # type: (str) -> Optional[str] + def get_repository_root(cls, location: str) -> Optional[str]: loc = super().get_repository_root(location) if loc: return loc try: r = cls.run_command( - ['root'], + ["root"], cwd=location, show_stdout=False, stdout_only=True, - on_returncode='raise', + on_returncode="raise", log_failed_cmd=False, ) except BadCommand: - logger.debug("could not determine if %s is under hg control " - "because hg is not available", location) + logger.debug( + "could not determine if %s is under hg control " + "because hg is not available", + location, + ) return None except InstallationError: return None - return os.path.normpath(r.rstrip('\r\n')) + return os.path.normpath(r.rstrip("\r\n")) vcs.register(Mercurial) diff --git a/env/Lib/site-packages/pip/_internal/vcs/subversion.py b/env/Lib/site-packages/pip/_internal/vcs/subversion.py index 4d1237ca0ca44e6cd59f39e9dbf6d61a3e34c84f..16d93a67b7b6feed66f2cc432f6250ca3ad34914 100644 --- a/env/Lib/site-packages/pip/_internal/vcs/subversion.py +++ b/env/Lib/site-packages/pip/_internal/vcs/subversion.py @@ -7,6 +7,7 @@ from pip._internal.utils.misc import ( HiddenText, display_path, is_console_interactive, + is_installable_dir, split_auth_from_netloc, ) from pip._internal.utils.subprocess import CommandArgs, make_command @@ -23,30 +24,25 @@ logger = logging.getLogger(__name__) _svn_xml_url_re = re.compile('url="([^"]+)"') _svn_rev_re = re.compile(r'committed-rev="(\d+)"') _svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') -_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>') +_svn_info_xml_url_re = re.compile(r"<url>(.*)</url>") class Subversion(VersionControl): - name = 'svn' - dirname = '.svn' - repo_name = 'checkout' - schemes = ( - 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn', 'svn+file' - ) + name = "svn" + dirname = ".svn" + repo_name = "checkout" + schemes = ("svn+ssh", "svn+http", "svn+https", "svn+svn", "svn+file") @classmethod - def should_add_vcs_url_prefix(cls, remote_url): - # type: (str) -> bool + def should_add_vcs_url_prefix(cls, remote_url: str) -> bool: return True @staticmethod - def get_base_rev_args(rev): - # type: (str) -> List[str] - return ['-r', rev] + def get_base_rev_args(rev: str) -> List[str]: + return ["-r", rev] @classmethod - def get_revision(cls, location): - # type: (str) -> str + def get_revision(cls, location: str) -> str: """ Return the maximum revision for all files under a given location """ @@ -56,9 +52,9 @@ class Subversion(VersionControl): for base, dirs, _ in os.walk(location): if cls.dirname not in dirs: dirs[:] = [] - continue # no sense walking uncontrolled subdirs + continue # no sense walking uncontrolled subdirs dirs.remove(cls.dirname) - entries_fn = os.path.join(base, cls.dirname, 'entries') + entries_fn = os.path.join(base, cls.dirname, "entries") if not os.path.exists(entries_fn): # FIXME: should we warn? continue @@ -67,21 +63,22 @@ class Subversion(VersionControl): if base == location: assert dirurl is not None - base = dirurl + '/' # save the root url + base = dirurl + "/" # save the root url elif not dirurl or not dirurl.startswith(base): dirs[:] = [] - continue # not part of the same svn tree, skip it + continue # not part of the same svn tree, skip it revision = max(revision, localrev) return str(revision) @classmethod - def get_netloc_and_auth(cls, netloc, scheme): - # type: (str, str) -> Tuple[str, Tuple[Optional[str], Optional[str]]] + def get_netloc_and_auth( + cls, netloc: str, scheme: str + ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]: """ This override allows the auth information to be passed to svn via the --username and --password options instead of via the URL. """ - if scheme == 'ssh': + if scheme == "ssh": # The --username and --password options can't be used for # svn+ssh URLs, so keep the auth information in the URL. return super().get_netloc_and_auth(netloc, scheme) @@ -89,40 +86,38 @@ class Subversion(VersionControl): return split_auth_from_netloc(netloc) @classmethod - def get_url_rev_and_auth(cls, url): - # type: (str) -> Tuple[str, Optional[str], AuthInfo] - # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it + def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: + # hotfix the URL scheme after removing svn+ from svn+ssh:// re-add it url, rev, user_pass = super().get_url_rev_and_auth(url) - if url.startswith('ssh://'): - url = 'svn+' + url + if url.startswith("ssh://"): + url = "svn+" + url return url, rev, user_pass @staticmethod - def make_rev_args(username, password): - # type: (Optional[str], Optional[HiddenText]) -> CommandArgs - extra_args = [] # type: CommandArgs + def make_rev_args( + username: Optional[str], password: Optional[HiddenText] + ) -> CommandArgs: + extra_args: CommandArgs = [] if username: - extra_args += ['--username', username] + extra_args += ["--username", username] if password: - extra_args += ['--password', password] + extra_args += ["--password", password] return extra_args @classmethod - def get_remote_url(cls, location): - # type: (str) -> str - # In cases where the source is in a subdirectory, not alongside - # setup.py we have to look up in the location until we find a real - # setup.py + def get_remote_url(cls, location: str) -> str: + # In cases where the source is in a subdirectory, we have to look up in + # the location until we find a valid project root. orig_location = location - while not os.path.exists(os.path.join(location, 'setup.py')): + while not is_installable_dir(location): last_location = location location = os.path.dirname(location) if location == last_location: # We've traversed up to the root of the filesystem without - # finding setup.py + # finding a Python project. logger.warning( - "Could not find setup.py for directory %s (tried all " + "Could not find Python project for directory %s (tried all " "parent directories)", orig_location, ) @@ -135,30 +130,27 @@ class Subversion(VersionControl): return url @classmethod - def _get_svn_url_rev(cls, location): - # type: (str) -> Tuple[Optional[str], int] + def _get_svn_url_rev(cls, location: str) -> Tuple[Optional[str], int]: from pip._internal.exceptions import InstallationError - entries_path = os.path.join(location, cls.dirname, 'entries') + entries_path = os.path.join(location, cls.dirname, "entries") if os.path.exists(entries_path): with open(entries_path) as f: data = f.read() else: # subversion >= 1.7 does not have the 'entries' file - data = '' + data = "" url = None - if (data.startswith('8') or - data.startswith('9') or - data.startswith('10')): - entries = list(map(str.splitlines, data.split('\n\x0c\n'))) + if data.startswith("8") or data.startswith("9") or data.startswith("10"): + entries = list(map(str.splitlines, data.split("\n\x0c\n"))) del entries[0][0] # get rid of the '8' url = entries[0][3] revs = [int(d[9]) for d in entries if len(d) > 9 and d[9]] + [0] - elif data.startswith('<?xml'): + elif data.startswith("<?xml"): match = _svn_xml_url_re.search(data) if not match: - raise ValueError(f'Badly formatted data: {data!r}') - url = match.group(1) # get repository URL + raise ValueError(f"Badly formatted data: {data!r}") + url = match.group(1) # get repository URL revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0] else: try: @@ -169,16 +161,14 @@ class Subversion(VersionControl): # is being used to prompt for passwords, because passwords # are only potentially needed for remote server requests. xml = cls.run_command( - ['info', '--xml', location], + ["info", "--xml", location], show_stdout=False, stdout_only=True, ) match = _svn_info_xml_url_re.search(xml) assert match is not None url = match.group(1) - revs = [ - int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml) - ] + revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)] except InstallationError: url, revs = None, [] @@ -190,13 +180,11 @@ class Subversion(VersionControl): return url, rev @classmethod - def is_commit_id_equal(cls, dest, name): - # type: (str, Optional[str]) -> bool + def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: """Always assume the versions don't match""" return False - def __init__(self, use_interactive=None): - # type: (bool) -> None + def __init__(self, use_interactive: Optional[bool] = None) -> None: if use_interactive is None: use_interactive = is_console_interactive() self.use_interactive = use_interactive @@ -206,12 +194,11 @@ class Subversion(VersionControl): # Special value definitions: # None: Not evaluated yet. # Empty tuple: Could not parse version. - self._vcs_version = None # type: Optional[Tuple[int, ...]] + self._vcs_version: Optional[Tuple[int, ...]] = None super().__init__() - def call_vcs_version(self): - # type: () -> Tuple[int, ...] + def call_vcs_version(self) -> Tuple[int, ...]: """Query the version of the currently installed Subversion client. :return: A tuple containing the parts of the version information or @@ -225,15 +212,13 @@ class Subversion(VersionControl): # compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu # svn, version 1.12.0-SlikSvn (SlikSvn/1.12.0) # compiled May 28 2019, 13:44:56 on x86_64-microsoft-windows6.2 - version_prefix = 'svn, version ' - version = self.run_command( - ['--version'], show_stdout=False, stdout_only=True - ) + version_prefix = "svn, version " + version = self.run_command(["--version"], show_stdout=False, stdout_only=True) if not version.startswith(version_prefix): return () - version = version[len(version_prefix):].split()[0] - version_list = version.partition('-')[0].split('.') + version = version[len(version_prefix) :].split()[0] + version_list = version.partition("-")[0].split(".") try: parsed_version = tuple(map(int, version_list)) except ValueError: @@ -241,8 +226,7 @@ class Subversion(VersionControl): return parsed_version - def get_vcs_version(self): - # type: () -> Tuple[int, ...] + def get_vcs_version(self) -> Tuple[int, ...]: """Return the version of the currently installed Subversion client. If the version of the Subversion client has already been queried, @@ -262,8 +246,7 @@ class Subversion(VersionControl): self._vcs_version = vcs_version return vcs_version - def get_remote_call_options(self): - # type: () -> CommandArgs + def get_remote_call_options(self) -> CommandArgs: """Return options to be used on calls to Subversion that contact the server. These options are applicable for the following ``svn`` subcommands used @@ -278,7 +261,7 @@ class Subversion(VersionControl): if not self.use_interactive: # --non-interactive switch is available since Subversion 0.14.4. # Subversion < 1.8 runs in interactive mode by default. - return ['--non-interactive'] + return ["--non-interactive"] svn_version = self.get_vcs_version() # By default, Subversion >= 1.8 runs in non-interactive mode if @@ -290,37 +273,49 @@ class Subversion(VersionControl): # SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip # can't safely add the option if the SVN version is < 1.8 (or unknown). if svn_version >= (1, 8): - return ['--force-interactive'] + return ["--force-interactive"] return [] - def fetch_new(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: rev_display = rev_options.to_display() logger.info( - 'Checking out %s%s to %s', + "Checking out %s%s to %s", url, rev_display, display_path(dest), ) + if verbosity <= 0: + flag = "--quiet" + else: + flag = "" cmd_args = make_command( - 'checkout', '-q', self.get_remote_call_options(), - rev_options.to_args(), url, dest, + "checkout", + flag, + self.get_remote_call_options(), + rev_options.to_args(), + url, + dest, ) self.run_command(cmd_args) - def switch(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: cmd_args = make_command( - 'switch', self.get_remote_call_options(), rev_options.to_args(), - url, dest, + "switch", + self.get_remote_call_options(), + rev_options.to_args(), + url, + dest, ) self.run_command(cmd_args) - def update(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: cmd_args = make_command( - 'update', self.get_remote_call_options(), rev_options.to_args(), + "update", + self.get_remote_call_options(), + rev_options.to_args(), dest, ) self.run_command(cmd_args) diff --git a/env/Lib/site-packages/pip/_internal/vcs/versioncontrol.py b/env/Lib/site-packages/pip/_internal/vcs/versioncontrol.py index 97977b5799cead4d19f217f9b7ec5e5055a25b70..02bbf68e7ad3ce14f191af24260312e817e12df7 100644 --- a/env/Lib/site-packages/pip/_internal/vcs/versioncontrol.py +++ b/env/Lib/site-packages/pip/_internal/vcs/versioncontrol.py @@ -6,6 +6,7 @@ import shutil import sys import urllib.parse from typing import ( + TYPE_CHECKING, Any, Dict, Iterable, @@ -27,12 +28,25 @@ from pip._internal.utils.misc import ( display_path, hide_url, hide_value, + is_installable_dir, rmtree, ) -from pip._internal.utils.subprocess import CommandArgs, call_subprocess, make_command +from pip._internal.utils.subprocess import ( + CommandArgs, + call_subprocess, + format_command_args, + make_command, +) from pip._internal.utils.urls import get_url_scheme -__all__ = ['vcs'] +if TYPE_CHECKING: + # Literal was introduced in Python 3.8. + # + # TODO: Remove `if TYPE_CHECKING` when dropping support for Python 3.7. + from typing import Literal + + +__all__ = ["vcs"] logger = logging.getLogger(__name__) @@ -40,19 +54,19 @@ logger = logging.getLogger(__name__) AuthInfo = Tuple[Optional[str], Optional[str]] -def is_url(name): - # type: (str) -> bool +def is_url(name: str) -> bool: """ Return true if the name looks like a URL. """ scheme = get_url_scheme(name) if scheme is None: return False - return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes + return scheme in ["http", "https", "file", "ftp"] + vcs.all_schemes -def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None): - # type: (str, str, str, Optional[str]) -> str +def make_vcs_requirement_url( + repo_url: str, rev: str, project_name: str, subdir: Optional[str] = None +) -> str: """ Return the URL for a VCS requirement. @@ -61,30 +75,31 @@ def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None): project_name: the (unescaped) project name. """ egg_project_name = project_name.replace("-", "_") - req = f'{repo_url}@{rev}#egg={egg_project_name}' + req = f"{repo_url}@{rev}#egg={egg_project_name}" if subdir: - req += f'&subdirectory={subdir}' + req += f"&subdirectory={subdir}" return req -def find_path_to_setup_from_repo_root(location, repo_root): - # type: (str, str) -> Optional[str] +def find_path_to_project_root_from_repo_root( + location: str, repo_root: str +) -> Optional[str]: """ - Find the path to `setup.py` by searching up the filesystem from `location`. - Return the path to `setup.py` relative to `repo_root`. - Return None if `setup.py` is in `repo_root` or cannot be found. + Find the the Python project's root by searching up the filesystem from + `location`. Return the path to project root relative to `repo_root`. + Return None if the project root is `repo_root`, or cannot be found. """ - # find setup.py + # find project root. orig_location = location - while not os.path.exists(os.path.join(location, 'setup.py')): + while not is_installable_dir(location): last_location = location location = os.path.dirname(location) if location == last_location: # We've traversed up to the root of the filesystem without - # finding setup.py + # finding a Python project. logger.warning( - "Could not find setup.py for directory %s (tried all " + "Could not find a Python project for directory %s (tried all " "parent directories)", orig_location, ) @@ -100,6 +115,12 @@ class RemoteNotFoundError(Exception): pass +class RemoteNotValidError(Exception): + def __init__(self, url: str): + super().__init__(url) + self.url = url + + class RevOptions: """ @@ -111,11 +132,10 @@ class RevOptions: def __init__( self, - vc_class, # type: Type[VersionControl] - rev=None, # type: Optional[str] - extra_args=None, # type: Optional[CommandArgs] - ): - # type: (...) -> None + vc_class: Type["VersionControl"], + rev: Optional[str] = None, + extra_args: Optional[CommandArgs] = None, + ) -> None: """ Args: vc_class: a VersionControl subclass. @@ -128,26 +148,23 @@ class RevOptions: self.extra_args = extra_args self.rev = rev self.vc_class = vc_class - self.branch_name = None # type: Optional[str] + self.branch_name: Optional[str] = None - def __repr__(self): - # type: () -> str - return f'<RevOptions {self.vc_class.name}: rev={self.rev!r}>' + def __repr__(self) -> str: + return f"<RevOptions {self.vc_class.name}: rev={self.rev!r}>" @property - def arg_rev(self): - # type: () -> Optional[str] + def arg_rev(self) -> Optional[str]: if self.rev is None: return self.vc_class.default_arg_rev return self.rev - def to_args(self): - # type: () -> CommandArgs + def to_args(self) -> CommandArgs: """ Return the VCS-specific command arguments. """ - args = [] # type: CommandArgs + args: CommandArgs = [] rev = self.arg_rev if rev is not None: args += self.vc_class.get_base_rev_args(rev) @@ -155,15 +172,13 @@ class RevOptions: return args - def to_display(self): - # type: () -> str + def to_display(self) -> str: if not self.rev: - return '' + return "" - return f' (to revision {self.rev})' + return f" (to revision {self.rev})" - def make_new(self, rev): - # type: (str) -> RevOptions + def make_new(self, rev: str) -> "RevOptions": """ Make a copy of the current instance, but with a new rev. @@ -174,54 +189,46 @@ class RevOptions: class VcsSupport: - _registry = {} # type: Dict[str, VersionControl] - schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn'] + _registry: Dict[str, "VersionControl"] = {} + schemes = ["ssh", "git", "hg", "bzr", "sftp", "svn"] - def __init__(self): - # type: () -> None + def __init__(self) -> None: # Register more schemes with urlparse for various version control # systems urllib.parse.uses_netloc.extend(self.schemes) super().__init__() - def __iter__(self): - # type: () -> Iterator[str] + def __iter__(self) -> Iterator[str]: return self._registry.__iter__() @property - def backends(self): - # type: () -> List[VersionControl] + def backends(self) -> List["VersionControl"]: return list(self._registry.values()) @property - def dirnames(self): - # type: () -> List[str] + def dirnames(self) -> List[str]: return [backend.dirname for backend in self.backends] @property - def all_schemes(self): - # type: () -> List[str] - schemes = [] # type: List[str] + def all_schemes(self) -> List[str]: + schemes: List[str] = [] for backend in self.backends: schemes.extend(backend.schemes) return schemes - def register(self, cls): - # type: (Type[VersionControl]) -> None - if not hasattr(cls, 'name'): - logger.warning('Cannot register VCS %s', cls.__name__) + def register(self, cls: Type["VersionControl"]) -> None: + if not hasattr(cls, "name"): + logger.warning("Cannot register VCS %s", cls.__name__) return if cls.name not in self._registry: self._registry[cls.name] = cls() - logger.debug('Registered VCS backend: %s', cls.name) + logger.debug("Registered VCS backend: %s", cls.name) - def unregister(self, name): - # type: (str) -> None + def unregister(self, name: str) -> None: if name in self._registry: del self._registry[name] - def get_backend_for_dir(self, location): - # type: (str) -> Optional[VersionControl] + def get_backend_for_dir(self, location: str) -> Optional["VersionControl"]: """ Return a VersionControl object if a repository of that type is found at the given directory. @@ -231,8 +238,7 @@ class VcsSupport: repo_path = vcs_backend.get_repository_root(location) if not repo_path: continue - logger.debug('Determine that %s uses VCS: %s', - location, vcs_backend.name) + logger.debug("Determine that %s uses VCS: %s", location, vcs_backend.name) vcs_backends[repo_path] = vcs_backend if not vcs_backends: @@ -245,8 +251,7 @@ class VcsSupport: inner_most_repo_path = max(vcs_backends, key=len) return vcs_backends[inner_most_repo_path] - def get_backend_for_scheme(self, scheme): - # type: (str) -> Optional[VersionControl] + def get_backend_for_scheme(self, scheme: str) -> Optional["VersionControl"]: """ Return a VersionControl object or None. """ @@ -255,8 +260,7 @@ class VcsSupport: return vcs_backend return None - def get_backend(self, name): - # type: (str) -> Optional[VersionControl] + def get_backend(self, name: str) -> Optional["VersionControl"]: """ Return a VersionControl object or None. """ @@ -268,44 +272,40 @@ vcs = VcsSupport() class VersionControl: - name = '' - dirname = '' - repo_name = '' + name = "" + dirname = "" + repo_name = "" # List of supported schemes for this Version Control - schemes = () # type: Tuple[str, ...] + schemes: Tuple[str, ...] = () # Iterable of environment variable names to pass to call_subprocess(). - unset_environ = () # type: Tuple[str, ...] - default_arg_rev = None # type: Optional[str] + unset_environ: Tuple[str, ...] = () + default_arg_rev: Optional[str] = None @classmethod - def should_add_vcs_url_prefix(cls, remote_url): - # type: (str) -> bool + def should_add_vcs_url_prefix(cls, remote_url: str) -> bool: """ Return whether the vcs prefix (e.g. "git+") should be added to a repository's remote url when used in a requirement. """ - return not remote_url.lower().startswith(f'{cls.name}:') + return not remote_url.lower().startswith(f"{cls.name}:") @classmethod - def get_subdirectory(cls, location): - # type: (str) -> Optional[str] + def get_subdirectory(cls, location: str) -> Optional[str]: """ - Return the path to setup.py, relative to the repo root. - Return None if setup.py is in the repo root. + Return the path to Python project root, relative to the repo root. + Return None if the project root is in the repo root. """ return None @classmethod - def get_requirement_revision(cls, repo_dir): - # type: (str) -> str + def get_requirement_revision(cls, repo_dir: str) -> str: """ Return the revision string that should be used in a requirement. """ return cls.get_revision(repo_dir) @classmethod - def get_src_requirement(cls, repo_dir, project_name): - # type: (str, str) -> str + def get_src_requirement(cls, repo_dir: str, project_name: str) -> str: """ Return the requirement string to use to redownload the files currently at the given repository directory. @@ -320,18 +320,16 @@ class VersionControl: repo_url = cls.get_remote_url(repo_dir) if cls.should_add_vcs_url_prefix(repo_url): - repo_url = f'{cls.name}+{repo_url}' + repo_url = f"{cls.name}+{repo_url}" revision = cls.get_requirement_revision(repo_dir) subdir = cls.get_subdirectory(repo_dir) - req = make_vcs_requirement_url(repo_url, revision, project_name, - subdir=subdir) + req = make_vcs_requirement_url(repo_url, revision, project_name, subdir=subdir) return req @staticmethod - def get_base_rev_args(rev): - # type: (str) -> List[str] + def get_base_rev_args(rev: str) -> List[str]: """ Return the base revision arguments for a vcs command. @@ -340,8 +338,7 @@ class VersionControl: """ raise NotImplementedError - def is_immutable_rev_checkout(self, url, dest): - # type: (str, str) -> bool + def is_immutable_rev_checkout(self, url: str, dest: str) -> bool: """ Return true if the commit hash checked out at dest matches the revision in url. @@ -355,8 +352,9 @@ class VersionControl: return False @classmethod - def make_rev_options(cls, rev=None, extra_args=None): - # type: (Optional[str], Optional[CommandArgs]) -> RevOptions + def make_rev_options( + cls, rev: Optional[str] = None, extra_args: Optional[CommandArgs] = None + ) -> RevOptions: """ Return a RevOptions object. @@ -367,18 +365,18 @@ class VersionControl: return RevOptions(cls, rev, extra_args=extra_args) @classmethod - def _is_local_repository(cls, repo): - # type: (str) -> bool + def _is_local_repository(cls, repo: str) -> bool: """ - posix absolute paths start with os.path.sep, - win32 ones start with drive (like c:\\folder) + posix absolute paths start with os.path.sep, + win32 ones start with drive (like c:\\folder) """ drive, tail = os.path.splitdrive(repo) return repo.startswith(os.path.sep) or bool(drive) @classmethod - def get_netloc_and_auth(cls, netloc, scheme): - # type: (str, str) -> Tuple[str, Tuple[Optional[str], Optional[str]]] + def get_netloc_and_auth( + cls, netloc: str, scheme: str + ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]: """ Parse the repository URL's netloc, and return the new netloc to use along with auth information. @@ -397,8 +395,7 @@ class VersionControl: return netloc, (None, None) @classmethod - def get_url_rev_and_auth(cls, url): - # type: (str) -> Tuple[str, Optional[str], AuthInfo] + def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: """ Parse the repository URL to use, and return the URL, revision, and auth info to use. @@ -406,44 +403,44 @@ class VersionControl: Returns: (url, rev, (username, password)). """ scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) - if '+' not in scheme: + if "+" not in scheme: raise ValueError( "Sorry, {!r} is a malformed VCS url. " "The format is <vcs>+<protocol>://<url>, " "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url) ) # Remove the vcs prefix. - scheme = scheme.split('+', 1)[1] + scheme = scheme.split("+", 1)[1] netloc, user_pass = cls.get_netloc_and_auth(netloc, scheme) rev = None - if '@' in path: - path, rev = path.rsplit('@', 1) + if "@" in path: + path, rev = path.rsplit("@", 1) if not rev: raise InstallationError( "The URL {!r} has an empty revision (after @) " "which is not supported. Include a revision after @ " "or remove @ from the URL.".format(url) ) - url = urllib.parse.urlunsplit((scheme, netloc, path, query, '')) + url = urllib.parse.urlunsplit((scheme, netloc, path, query, "")) return url, rev, user_pass @staticmethod - def make_rev_args(username, password): - # type: (Optional[str], Optional[HiddenText]) -> CommandArgs + def make_rev_args( + username: Optional[str], password: Optional[HiddenText] + ) -> CommandArgs: """ Return the RevOptions "extra arguments" to use in obtain(). """ return [] - def get_url_rev_options(self, url): - # type: (HiddenText) -> Tuple[HiddenText, RevOptions] + def get_url_rev_options(self, url: HiddenText) -> Tuple[HiddenText, RevOptions]: """ Return the URL and RevOptions object to use in obtain(), as a tuple (url, rev_options). """ secret_url, rev, user_pass = self.get_url_rev_and_auth(url.secret) username, secret_password = user_pass - password = None # type: Optional[HiddenText] + password: Optional[HiddenText] = None if secret_password is not None: password = hide_value(secret_password) extra_args = self.make_rev_args(username, password) @@ -452,24 +449,23 @@ class VersionControl: return hide_url(secret_url), rev_options @staticmethod - def normalize_url(url): - # type: (str) -> str + def normalize_url(url: str) -> str: """ Normalize a URL for comparison by unquoting it and removing any trailing slash. """ - return urllib.parse.unquote(url).rstrip('/') + return urllib.parse.unquote(url).rstrip("/") @classmethod - def compare_urls(cls, url1, url2): - # type: (str, str) -> bool + def compare_urls(cls, url1: str, url2: str) -> bool: """ Compare two repo URLs for identity, ignoring incidental differences. """ - return (cls.normalize_url(url1) == cls.normalize_url(url2)) + return cls.normalize_url(url1) == cls.normalize_url(url2) - def fetch_new(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def fetch_new( + self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int + ) -> None: """ Fetch a revision from a repository, in the case that this is the first fetch from the repository. @@ -477,11 +473,11 @@ class VersionControl: Args: dest: the directory to fetch the repository to. rev_options: a RevOptions object. + verbosity: verbosity level. """ raise NotImplementedError - def switch(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: """ Switch the repo at ``dest`` to point to ``URL``. @@ -490,8 +486,7 @@ class VersionControl: """ raise NotImplementedError - def update(self, dest, url, rev_options): - # type: (str, HiddenText, RevOptions) -> None + def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: """ Update an already-existing repo to the given ``rev_options``. @@ -501,8 +496,7 @@ class VersionControl: raise NotImplementedError @classmethod - def is_commit_id_equal(cls, dest, name): - # type: (str, Optional[str]) -> bool + def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: """ Return whether the id of the current commit equals the given name. @@ -512,19 +506,19 @@ class VersionControl: """ raise NotImplementedError - def obtain(self, dest, url): - # type: (str, HiddenText) -> None + def obtain(self, dest: str, url: HiddenText, verbosity: int) -> None: """ Install or update in editable mode the package represented by this VersionControl object. :param dest: the repository directory in which to install or update. :param url: the repository URL starting with a vcs prefix. + :param verbosity: verbosity level. """ url, rev_options = self.get_url_rev_options(url) if not os.path.exists(dest): - self.fetch_new(dest, url, rev_options) + self.fetch_new(dest, url, rev_options, verbosity=verbosity) return rev_display = rev_options.to_display() @@ -532,73 +526,68 @@ class VersionControl: existing_url = self.get_remote_url(dest) if self.compare_urls(existing_url, url.secret): logger.debug( - '%s in %s exists, and has correct URL (%s)', + "%s in %s exists, and has correct URL (%s)", self.repo_name.title(), display_path(dest), url, ) if not self.is_commit_id_equal(dest, rev_options.rev): logger.info( - 'Updating %s %s%s', + "Updating %s %s%s", display_path(dest), self.repo_name, rev_display, ) self.update(dest, url, rev_options) else: - logger.info('Skipping because already up-to-date.') + logger.info("Skipping because already up-to-date.") return logger.warning( - '%s %s in %s exists with URL %s', + "%s %s in %s exists with URL %s", self.name, self.repo_name, display_path(dest), existing_url, ) - prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', - ('s', 'i', 'w', 'b')) + prompt = ("(s)witch, (i)gnore, (w)ipe, (b)ackup ", ("s", "i", "w", "b")) else: logger.warning( - 'Directory %s already exists, and is not a %s %s.', + "Directory %s already exists, and is not a %s %s.", dest, self.name, self.repo_name, ) # https://github.com/python/mypy/issues/1174 - prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore - ('i', 'w', 'b')) + prompt = ("(i)gnore, (w)ipe, (b)ackup ", ("i", "w", "b")) # type: ignore logger.warning( - 'The plan is to install the %s repository %s', + "The plan is to install the %s repository %s", self.name, url, ) - response = ask_path_exists('What to do? {}'.format( - prompt[0]), prompt[1]) + response = ask_path_exists("What to do? {}".format(prompt[0]), prompt[1]) - if response == 'a': + if response == "a": sys.exit(-1) - if response == 'w': - logger.warning('Deleting %s', display_path(dest)) + if response == "w": + logger.warning("Deleting %s", display_path(dest)) rmtree(dest) - self.fetch_new(dest, url, rev_options) + self.fetch_new(dest, url, rev_options, verbosity=verbosity) return - if response == 'b': + if response == "b": dest_dir = backup_dir(dest) - logger.warning( - 'Backing up %s to %s', display_path(dest), dest_dir, - ) + logger.warning("Backing up %s to %s", display_path(dest), dest_dir) shutil.move(dest, dest_dir) - self.fetch_new(dest, url, rev_options) + self.fetch_new(dest, url, rev_options, verbosity=verbosity) return # Do nothing if the response is "i". - if response == 's': + if response == "s": logger.info( - 'Switching %s %s to %s%s', + "Switching %s %s to %s%s", self.repo_name, display_path(dest), url, @@ -606,21 +595,20 @@ class VersionControl: ) self.switch(dest, url, rev_options) - def unpack(self, location, url): - # type: (str, HiddenText) -> None + def unpack(self, location: str, url: HiddenText, verbosity: int) -> None: """ Clean up current location and download the url repository (and vcs infos) into location :param url: the repository URL starting with a vcs prefix. + :param verbosity: verbosity level. """ if os.path.exists(location): rmtree(location) - self.obtain(location, url=url) + self.obtain(location, url=url, verbosity=verbosity) @classmethod - def get_remote_url(cls, location): - # type: (str) -> str + def get_remote_url(cls, location: str) -> str: """ Return the url used at location @@ -630,8 +618,7 @@ class VersionControl: raise NotImplementedError @classmethod - def get_revision(cls, location): - # type: (str) -> str + def get_revision(cls, location: str) -> str: """ Return the current commit id of the files at the given location. """ @@ -640,40 +627,46 @@ class VersionControl: @classmethod def run_command( cls, - cmd, # type: Union[List[str], CommandArgs] - show_stdout=True, # type: bool - cwd=None, # type: Optional[str] - on_returncode='raise', # type: str - extra_ok_returncodes=None, # type: Optional[Iterable[int]] - command_desc=None, # type: Optional[str] - extra_environ=None, # type: Optional[Mapping[str, Any]] - spinner=None, # type: Optional[SpinnerInterface] - log_failed_cmd=True, # type: bool - stdout_only=False, # type: bool - ): - # type: (...) -> str + cmd: Union[List[str], CommandArgs], + show_stdout: bool = True, + cwd: Optional[str] = None, + on_returncode: 'Literal["raise", "warn", "ignore"]' = "raise", + extra_ok_returncodes: Optional[Iterable[int]] = None, + command_desc: Optional[str] = None, + extra_environ: Optional[Mapping[str, Any]] = None, + spinner: Optional[SpinnerInterface] = None, + log_failed_cmd: bool = True, + stdout_only: bool = False, + ) -> str: """ Run a VCS subcommand This is simply a wrapper around call_subprocess that adds the VCS command name, and checks that the VCS is available """ cmd = make_command(cls.name, *cmd) + if command_desc is None: + command_desc = format_command_args(cmd) try: - return call_subprocess(cmd, show_stdout, cwd, - on_returncode=on_returncode, - extra_ok_returncodes=extra_ok_returncodes, - command_desc=command_desc, - extra_environ=extra_environ, - unset_environ=cls.unset_environ, - spinner=spinner, - log_failed_cmd=log_failed_cmd, - stdout_only=stdout_only) + return call_subprocess( + cmd, + show_stdout, + cwd, + on_returncode=on_returncode, + extra_ok_returncodes=extra_ok_returncodes, + command_desc=command_desc, + extra_environ=extra_environ, + unset_environ=cls.unset_environ, + spinner=spinner, + log_failed_cmd=log_failed_cmd, + stdout_only=stdout_only, + ) except FileNotFoundError: # errno.ENOENT = no such file or directory # In other words, the VCS executable isn't available raise BadCommand( - f'Cannot find command {cls.name!r} - do you have ' - f'{cls.name!r} installed and in your PATH?') + f"Cannot find command {cls.name!r} - do you have " + f"{cls.name!r} installed and in your PATH?" + ) except PermissionError: # errno.EACCES = Permission denied # This error occurs, for instance, when the command is installed @@ -688,18 +681,15 @@ class VersionControl: ) @classmethod - def is_repository_directory(cls, path): - # type: (str) -> bool + def is_repository_directory(cls, path: str) -> bool: """ Return whether a directory path is a repository directory. """ - logger.debug('Checking in %s for %s (%s)...', - path, cls.dirname, cls.name) + logger.debug("Checking in %s for %s (%s)...", path, cls.dirname, cls.name) return os.path.exists(os.path.join(path, cls.dirname)) @classmethod - def get_repository_root(cls, location): - # type: (str) -> Optional[str] + def get_repository_root(cls, location: str) -> Optional[str]: """ Return the "root" (top-level) directory controlled by the vcs, or `None` if the directory is not in any. diff --git a/env/Lib/site-packages/pip/_internal/wheel_builder.py b/env/Lib/site-packages/pip/_internal/wheel_builder.py index 92f172bca1660978b0ab6d9abb737171856496a2..60d75dd18effb6e35b216cdfa3e30b8cc5bd620b 100644 --- a/env/Lib/site-packages/pip/_internal/wheel_builder.py +++ b/env/Lib/site-packages/pip/_internal/wheel_builder.py @@ -5,21 +5,22 @@ import logging import os.path import re import shutil -from typing import Any, Callable, Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Tuple from pip._vendor.packaging.utils import canonicalize_name, canonicalize_version from pip._vendor.packaging.version import InvalidVersion, Version from pip._internal.cache import WheelCache from pip._internal.exceptions import InvalidWheelFilename, UnsupportedWheel -from pip._internal.metadata import get_wheel_distribution +from pip._internal.metadata import FilesystemWheel, get_wheel_distribution from pip._internal.models.link import Link from pip._internal.models.wheel import Wheel from pip._internal.operations.build.wheel import build_wheel_pep517 +from pip._internal.operations.build.wheel_editable import build_wheel_editable from pip._internal.operations.build.wheel_legacy import build_wheel_legacy from pip._internal.req.req_install import InstallRequirement from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed +from pip._internal.utils.misc import ensure_dir, hash_file from pip._internal.utils.setuptools_build import make_setuptools_clean_args from pip._internal.utils.subprocess import call_subprocess from pip._internal.utils.temp_dir import TempDirectory @@ -28,14 +29,12 @@ from pip._internal.vcs import vcs logger = logging.getLogger(__name__) -_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.IGNORECASE) +_egg_info_re = re.compile(r"([a-z0-9_.]+)-([a-z0-9_.!+-]+)", re.IGNORECASE) -BinaryAllowedPredicate = Callable[[InstallRequirement], bool] BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]] -def _contains_egg_info(s): - # type: (str) -> bool +def _contains_egg_info(s: str) -> bool: """Determine whether the string looks like an egg_info. :param s: The string to parse. E.g. foo-2.1 @@ -44,11 +43,9 @@ def _contains_egg_info(s): def _should_build( - req, # type: InstallRequirement - need_wheel, # type: bool - check_binary_allowed, # type: BinaryAllowedPredicate -): - # type: (...) -> bool + req: InstallRequirement, + need_wheel: bool, +) -> bool: """Return whether an InstallRequirement should be built into a wheel.""" if req.constraint: # never build requirements that are merely constraints @@ -56,7 +53,8 @@ def _should_build( if req.is_wheel: if need_wheel: logger.info( - 'Skipping %s, due to already being wheel.', req.name, + "Skipping %s, due to already being wheel.", + req.name, ) return False @@ -67,53 +65,31 @@ def _should_build( # From this point, this concerns the pip install command only # (need_wheel=False). - if req.editable or not req.source_dir: - return False - - if req.use_pep517: - return True - - if not check_binary_allowed(req): - logger.info( - "Skipping wheel build for %s, due to binaries " - "being disabled for it.", req.name, - ) + if not req.source_dir: return False - if not is_wheel_installed(): - # we don't build legacy requirements if wheel is not installed - logger.info( - "Using legacy 'setup.py install' for %s, " - "since package 'wheel' is not installed.", req.name, - ) - return False + if req.editable: + # we only build PEP 660 editable requirements + return req.supports_pyproject_editable() return True def should_build_for_wheel_command( - req, # type: InstallRequirement -): - # type: (...) -> bool - return _should_build( - req, need_wheel=True, check_binary_allowed=_always_true - ) + req: InstallRequirement, +) -> bool: + return _should_build(req, need_wheel=True) def should_build_for_install_command( - req, # type: InstallRequirement - check_binary_allowed, # type: BinaryAllowedPredicate -): - # type: (...) -> bool - return _should_build( - req, need_wheel=False, check_binary_allowed=check_binary_allowed - ) + req: InstallRequirement, +) -> bool: + return _should_build(req, need_wheel=False) def _should_cache( - req, # type: InstallRequirement -): - # type: (...) -> Optional[bool] + req: InstallRequirement, +) -> Optional[bool]: """ Return whether a built InstallRequirement can be stored in the persistent wheel cache, assuming the wheel cache is available, and _should_build() @@ -144,10 +120,9 @@ def _should_cache( def _get_cache_dir( - req, # type: InstallRequirement - wheel_cache, # type: WheelCache -): - # type: (...) -> str + req: InstallRequirement, + wheel_cache: WheelCache, +) -> str: """Return the persistent or temporary cache directory where the built wheel need to be stored. """ @@ -160,13 +135,7 @@ def _get_cache_dir( return cache_dir -def _always_true(_): - # type: (Any) -> bool - return True - - -def _verify_one(req, wheel_path): - # type: (InstallRequirement, str) -> None +def _verify_one(req: InstallRequirement, wheel_path: str) -> None: canonical_name = canonicalize_name(req.name or "") w = Wheel(os.path.basename(wheel_path)) if canonicalize_name(w.name) != canonical_name: @@ -174,7 +143,7 @@ def _verify_one(req, wheel_path): "Wheel has unexpected file name: expected {!r}, " "got {!r}".format(canonical_name, w.name), ) - dist = get_wheel_distribution(wheel_path, canonical_name) + dist = get_wheel_distribution(FilesystemWheel(wheel_path), canonical_name) dist_verstr = str(dist.version) if canonicalize_version(dist_verstr) != canonicalize_version(w.version): raise InvalidWheelFilename( @@ -189,8 +158,7 @@ def _verify_one(req, wheel_path): except InvalidVersion: msg = f"Invalid Metadata-Version: {metadata_version_value}" raise UnsupportedWheel(msg) - if (metadata_version >= Version("1.2") - and not isinstance(dist.version, Version)): + if metadata_version >= Version("1.2") and not isinstance(dist.version, Version): raise UnsupportedWheel( "Metadata 1.2 mandates PEP 440 version, " "but {!r} is not".format(dist_verstr) @@ -198,47 +166,50 @@ def _verify_one(req, wheel_path): def _build_one( - req, # type: InstallRequirement - output_dir, # type: str - verify, # type: bool - build_options, # type: List[str] - global_options, # type: List[str] -): - # type: (...) -> Optional[str] + req: InstallRequirement, + output_dir: str, + verify: bool, + build_options: List[str], + global_options: List[str], + editable: bool, +) -> Optional[str]: """Build one wheel. :return: The filename of the built wheel, or None if the build failed. """ + artifact = "editable" if editable else "wheel" try: ensure_dir(output_dir) except OSError as e: logger.warning( - "Building wheel for %s failed: %s", - req.name, e, + "Building %s for %s failed: %s", + artifact, + req.name, + e, ) return None # Install build deps into temporary directory (PEP 518) with req.build_env: wheel_path = _build_one_inside_env( - req, output_dir, build_options, global_options + req, output_dir, build_options, global_options, editable ) if wheel_path and verify: try: _verify_one(req, wheel_path) except (InvalidWheelFilename, UnsupportedWheel) as e: - logger.warning("Built wheel for %s is invalid: %s", req.name, e) + logger.warning("Built %s for %s is invalid: %s", artifact, req.name, e) return None return wheel_path def _build_one_inside_env( - req, # type: InstallRequirement - output_dir, # type: str - build_options, # type: List[str] - global_options, # type: List[str] -): - # type: (...) -> Optional[str] + req: InstallRequirement, + output_dir: str, + build_options: List[str], + global_options: List[str], + editable: bool, +) -> Optional[str]: with TempDirectory(kind="wheel") as temp_dir: assert req.name if req.use_pep517: @@ -246,18 +217,26 @@ def _build_one_inside_env( assert req.pep517_backend if global_options: logger.warning( - 'Ignoring --global-option when building %s using PEP 517', req.name + "Ignoring --global-option when building %s using PEP 517", req.name ) if build_options: logger.warning( - 'Ignoring --build-option when building %s using PEP 517', req.name + "Ignoring --build-option when building %s using PEP 517", req.name + ) + if editable: + wheel_path = build_wheel_editable( + name=req.name, + backend=req.pep517_backend, + metadata_directory=req.metadata_directory, + tempd=temp_dir.path, + ) + else: + wheel_path = build_wheel_pep517( + name=req.name, + backend=req.pep517_backend, + metadata_directory=req.metadata_directory, + tempd=temp_dir.path, ) - wheel_path = build_wheel_pep517( - name=req.name, - backend=req.pep517_backend, - metadata_directory=req.metadata_directory, - tempd=temp_dir.path, - ) else: wheel_path = build_wheel_legacy( name=req.name, @@ -274,16 +253,20 @@ def _build_one_inside_env( try: wheel_hash, length = hash_file(wheel_path) shutil.move(wheel_path, dest_path) - logger.info('Created wheel for %s: ' - 'filename=%s size=%d sha256=%s', - req.name, wheel_name, length, - wheel_hash.hexdigest()) - logger.info('Stored in directory: %s', output_dir) + logger.info( + "Created wheel for %s: filename=%s size=%d sha256=%s", + req.name, + wheel_name, + length, + wheel_hash.hexdigest(), + ) + logger.info("Stored in directory: %s", output_dir) return dest_path except Exception as e: logger.warning( "Building wheel for %s failed: %s", - req.name, e, + req.name, + e, ) # Ignore return, we can't do anything else useful. if not req.use_pep517: @@ -291,30 +274,30 @@ def _build_one_inside_env( return None -def _clean_one_legacy(req, global_options): - # type: (InstallRequirement, List[str]) -> bool +def _clean_one_legacy(req: InstallRequirement, global_options: List[str]) -> bool: clean_args = make_setuptools_clean_args( req.setup_py_path, global_options=global_options, ) - logger.info('Running setup.py clean for %s', req.name) + logger.info("Running setup.py clean for %s", req.name) try: - call_subprocess(clean_args, cwd=req.source_dir) + call_subprocess( + clean_args, command_desc="python setup.py clean", cwd=req.source_dir + ) return True except Exception: - logger.error('Failed cleaning build dir for %s', req.name) + logger.error("Failed cleaning build dir for %s", req.name) return False def build( - requirements, # type: Iterable[InstallRequirement] - wheel_cache, # type: WheelCache - verify, # type: bool - build_options, # type: List[str] - global_options, # type: List[str] -): - # type: (...) -> BuildResult + requirements: Iterable[InstallRequirement], + wheel_cache: WheelCache, + verify: bool, + build_options: List[str], + global_options: List[str], +) -> BuildResult: """Build wheels. :return: The list of InstallRequirement that succeeded to build and @@ -325,18 +308,30 @@ def build( # Build the wheels. logger.info( - 'Building wheels for collected packages: %s', - ', '.join(req.name for req in requirements), # type: ignore + "Building wheels for collected packages: %s", + ", ".join(req.name for req in requirements), # type: ignore ) with indent_log(): build_successes, build_failures = [], [] for req in requirements: + assert req.name cache_dir = _get_cache_dir(req, wheel_cache) wheel_file = _build_one( - req, cache_dir, verify, build_options, global_options + req, + cache_dir, + verify, + build_options, + global_options, + req.editable and req.permit_editable_wheels, ) if wheel_file: + # Record the download origin in the cache + if req.download_info is not None: + # download_info is guaranteed to be set because when we build an + # InstallRequirement it has been through the preparer before, but + # let's be cautious. + wheel_cache.record_download_origin(cache_dir, req.download_info) # Update the link for this. req.link = Link(path_to_url(wheel_file)) req.local_file_path = req.link.file_path @@ -348,13 +343,13 @@ def build( # notify success/failure if build_successes: logger.info( - 'Successfully built %s', - ' '.join([req.name for req in build_successes]), # type: ignore + "Successfully built %s", + " ".join([req.name for req in build_successes]), # type: ignore ) if build_failures: logger.info( - 'Failed to build %s', - ' '.join([req.name for req in build_failures]), # type: ignore + "Failed to build %s", + " ".join([req.name for req in build_failures]), # type: ignore ) # Return a list of requirements that failed to build return build_successes, build_failures diff --git a/env/Lib/site-packages/pip/_vendor/__init__.py b/env/Lib/site-packages/pip/_vendor/__init__.py index a10ecd6074a179e05fde99e38278fc97d6f41653..b22f7abb93b9d7aeee50829b35746aaa3f9f5feb 100644 --- a/env/Lib/site-packages/pip/_vendor/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/__init__.py @@ -58,13 +58,11 @@ if DEBUNDLED: sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path # Actually alias all of our vendored dependencies. - vendored("appdirs") vendored("cachecontrol") vendored("certifi") vendored("colorama") vendored("distlib") vendored("distro") - vendored("html5lib") vendored("six") vendored("six.moves") vendored("six.moves.urllib") @@ -74,6 +72,7 @@ if DEBUNDLED: vendored("packaging.specifiers") vendored("pep517") vendored("pkg_resources") + vendored("platformdirs") vendored("progress") vendored("requests") vendored("requests.exceptions") @@ -106,8 +105,16 @@ if DEBUNDLED: vendored("requests.packages.urllib3.util.timeout") vendored("requests.packages.urllib3.util.url") vendored("resolvelib") + vendored("rich") + vendored("rich.console") + vendored("rich.highlighter") + vendored("rich.logging") + vendored("rich.markup") + vendored("rich.progress") + vendored("rich.segment") + vendored("rich.style") + vendored("rich.text") + vendored("rich.traceback") vendored("tenacity") - vendored("toml") - vendored("toml.encoder") - vendored("toml.decoder") + vendored("tomli") vendored("urllib3") diff --git a/env/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 44980fc6fc07a83d803c9c2c8f445fbf0a9293fb..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/__pycache__/appdirs.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/__pycache__/appdirs.cpython-39.pyc deleted file mode 100644 index 56bdeee96913eee10207643cdb43c03408645567..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/__pycache__/appdirs.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/__pycache__/distro.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/__pycache__/distro.cpython-39.pyc deleted file mode 100644 index 7a4216e49825f65191a088d7d1e2bd48755d1cea..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/__pycache__/distro.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/__pycache__/pyparsing.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/__pycache__/pyparsing.cpython-39.pyc deleted file mode 100644 index a88fe70a195b513e28b7a7a0cec5f1e40fe85bb9..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/__pycache__/pyparsing.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/__pycache__/six.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/__pycache__/six.cpython-39.pyc deleted file mode 100644 index fb7259e9dd481e461f0478555689e5f713599bca..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/__pycache__/six.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/appdirs.py b/env/Lib/site-packages/pip/_vendor/appdirs.py deleted file mode 100644 index 33a3b77410c02af114b01dd74b5de09f9e30562e..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/appdirs.py +++ /dev/null @@ -1,633 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See <http://github.com/ActiveState/appdirs> for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version__ = "1.4.4" -__version_info__ = tuple(int(segment) for segment in __version__.split(".")) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -elif sys.platform == 'cli' and os.name == 'nt': - # Detect Windows in IronPython to match pip._internal.utils.compat.WINDOWS - # Discussion: <https://github.com/pypa/pip/pull/7501> - system = 'win32' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/<AppName> # or ~/.config/<AppName>, if the other does not exist - Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName> - Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName> - Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName> - Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName> - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/<AppName>". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/<AppName>', - if XDG_DATA_DIRS is not set - - Typical site data directories are: - Mac OS X: /Library/Application Support/<AppName> - Unix: /usr/local/share/<AppName> or /usr/share/<AppName> - Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.path.join(x, appname) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user config directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/<AppName>". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -# for the discussion regarding site_config_dir locations -# see <https://github.com/pypa/pip/issues/1733> -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set - - Typical site config directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS (missing or empty) - # see <https://github.com/pypa/pip/pull/7501#discussion_r360624829> - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS') or '/etc/xdg' - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) if x] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.path.join(x, appname) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the base app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/<AppName> - Unix: ~/.cache/<AppName> (XDG default) - Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache - Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - # When using Python 2, return paths as bytes on Windows like we do on - # other operating systems. See helper function docs for more details. - if not PY3 and isinstance(path, unicode): - path = _win_path_to_bytes(path) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific state dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user state directories are: - Mac OS X: same as user_data_dir - Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state> - to extend the XDG spec and support $XDG_STATE_HOME. - - That means, by default "~/.local/state/<AppName>". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the base app data dir for Windows, and "log" to the - base cache dir for Unix. See discussion below. - - Typical user log directories are: - Mac OS X: ~/Library/Logs/<AppName> - Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs - Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname=None, appauthor=None, version=None, - roaming=False, multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_state_dir(self): - return user_state_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - if PY3: - import winreg as _winreg - else: - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernel.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -def _win_path_to_bytes(path): - """Encode Windows paths to bytes. Only used on Python 2. - - Motivation is to be consistent with other operating systems where paths - are also returned as bytes. This avoids problems mixing bytes and Unicode - elsewhere in the codebase. For more details and discussion see - <https://github.com/pypa/pip/issues/3463>. - - If encoding using ASCII and MBCS fails, return the original Unicode path. - """ - for encoding in ('ASCII', 'MBCS'): - try: - return path.encode(encoding) - except (UnicodeEncodeError, LookupError): - pass - return path - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "site_data_dir", - "site_config_dir") - - print("-- app dirs %s --" % __version__) - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py index a1bbbbe3bff592b2bc761772dc6974af4afb7035..f631ae6df4747b808cac7c03b38e3e1d48bea00b 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py @@ -1,11 +1,18 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + """CacheControl import Interface. Make it easy to import from cachecontrol without long namespaces. """ __author__ = "Eric Larson" __email__ = "eric@ionrock.org" -__version__ = "0.12.6" +__version__ = "0.12.11" from .wrapper import CacheControl from .adapter import CacheControlAdapter from .controller import CacheController + +import logging +logging.getLogger(__name__).addHandler(logging.NullHandler()) diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 7f8709b8f861c7d704fc88181acca9d7432a8e66..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-39.pyc deleted file mode 100644 index babf4480c0b72b44b0c67208f79c3c7b208b1077..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-39.pyc deleted file mode 100644 index e0f1373c6316e56a3453fc290c3eb991f10fb2d4..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-39.pyc deleted file mode 100644 index f228f240b7d9bbfe7aaf212595e452a17ad68afa..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/compat.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/compat.cpython-39.pyc deleted file mode 100644 index b1058db3c293c3e418b78f03d0ada27255a6d3d4..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/compat.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-39.pyc deleted file mode 100644 index 1c44791a4bc4d1bb4f5cabcdb2c8dd7d69809daa..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-39.pyc deleted file mode 100644 index 0841947fd645d6befe015bf398f76dabd0eccaea..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-39.pyc deleted file mode 100644 index 762c6978cd7a53faa5a511a76f8c8560e5f813ef..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-39.pyc deleted file mode 100644 index 2ade01220d8e1573af27b8b9303741cea233c271..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-39.pyc deleted file mode 100644 index 2f99daf818dc703fd376949b4720b6abe42f8dd3..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py index f1e0ad94a1ce2cd06bf381845fafbecbd9846f88..4266b5ee92a24b5e0ef65689a1b94a98bb4a9b56 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + import logging from pip._vendor import requests diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py index 815650e81fe23c35523e66ff73e468ada1fd507d..94c75e1a05b47922945c5233e90e9f936b108b66 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py @@ -1,16 +1,20 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + import types import functools import zlib from pip._vendor.requests.adapters import HTTPAdapter -from .controller import CacheController +from .controller import CacheController, PERMANENT_REDIRECT_STATUSES from .cache import DictCache from .filewrapper import CallbackFileWrapper class CacheControlAdapter(HTTPAdapter): - invalidating_methods = {"PUT", "DELETE"} + invalidating_methods = {"PUT", "PATCH", "DELETE"} def __init__( self, @@ -93,7 +97,7 @@ class CacheControlAdapter(HTTPAdapter): response = cached_response # We always cache the 301 responses - elif response.status == 301: + elif int(response.status) in PERMANENT_REDIRECT_STATUSES: self.controller.cache_response(request, response) else: # Wrap the response file with a wrapper that will cache the diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/cache.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/cache.py index 94e07732d91fdf38f4559da04db4221d26553d40..2a965f595ff0756002e2a2c79da551fa8c8fff25 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/cache.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/cache.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + """ The cache object API for implementing caches. The default is a thread safe in-memory dictionary. @@ -10,7 +14,7 @@ class BaseCache(object): def get(self, key): raise NotImplementedError() - def set(self, key, value): + def set(self, key, value, expires=None): raise NotImplementedError() def delete(self, key): @@ -29,7 +33,7 @@ class DictCache(BaseCache): def get(self, key): return self.data.get(key, None) - def set(self, key, value): + def set(self, key, value, expires=None): with self.lock: self.data.update({key: value}) @@ -37,3 +41,25 @@ class DictCache(BaseCache): with self.lock: if key in self.data: self.data.pop(key) + + +class SeparateBodyBaseCache(BaseCache): + """ + In this variant, the body is not stored mixed in with the metadata, but is + passed in (as a bytes-like object) in a separate call to ``set_body()``. + + That is, the expected interaction pattern is:: + + cache.set(key, serialized_metadata) + cache.set_body(key) + + Similarly, the body should be loaded separately via ``get_body()``. + """ + def set_body(self, key, body): + raise NotImplementedError() + + def get_body(self, key): + """ + Return the body as file-like object. + """ + raise NotImplementedError() diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py index 0e1658fa5e5e498bcfa84702ed1a53dfcec071ff..37827291fb5a76d7ef9a7a3a695710b2074ca09a 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py @@ -1,2 +1,9 @@ -from .file_cache import FileCache # noqa -from .redis_cache import RedisCache # noqa +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + +from .file_cache import FileCache, SeparateBodyFileCache +from .redis_cache import RedisCache + + +__all__ = ["FileCache", "SeparateBodyFileCache", "RedisCache"] diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index c73e5810a5331f1466b4a28580b19a019833375f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-39.pyc deleted file mode 100644 index 69ee4447ae818ae0a7b0a8a7e2a87e09b2cb0849..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-39.pyc deleted file mode 100644 index 25d81532e984ec5fec1d5b61f04ded9f82bc250e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py index 607b9452428f4343a1a017dfd230b12456060543..f1ddb2ebdf9eb702718fd31e09ff92b592da519f 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py @@ -1,8 +1,12 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + import hashlib import os from textwrap import dedent -from ..cache import BaseCache +from ..cache import BaseCache, SeparateBodyBaseCache from ..controller import CacheController try: @@ -53,7 +57,8 @@ def _secure_open_write(filename, fmode): raise -class FileCache(BaseCache): +class _FileCacheMixin: + """Shared implementation for both FileCache variants.""" def __init__( self, @@ -114,22 +119,27 @@ class FileCache(BaseCache): except FileNotFoundError: return None - def set(self, key, value): + def set(self, key, value, expires=None): name = self._fn(key) + self._write(name, value) + def _write(self, path, data: bytes): + """ + Safely write the data to the given path. + """ # Make sure the directory exists try: - os.makedirs(os.path.dirname(name), self.dirmode) + os.makedirs(os.path.dirname(path), self.dirmode) except (IOError, OSError): pass - with self.lock_class(name) as lock: + with self.lock_class(path) as lock: # Write our actual file with _secure_open_write(lock.path, self.filemode) as fh: - fh.write(value) + fh.write(data) - def delete(self, key): - name = self._fn(key) + def _delete(self, key, suffix): + name = self._fn(key) + suffix if not self.forever: try: os.remove(name) @@ -137,6 +147,38 @@ class FileCache(BaseCache): pass +class FileCache(_FileCacheMixin, BaseCache): + """ + Traditional FileCache: body is stored in memory, so not suitable for large + downloads. + """ + + def delete(self, key): + self._delete(key, "") + + +class SeparateBodyFileCache(_FileCacheMixin, SeparateBodyBaseCache): + """ + Memory-efficient FileCache: body is stored in a separate file, reducing + peak memory usage. + """ + + def get_body(self, key): + name = self._fn(key) + ".body" + try: + return open(name, "rb") + except FileNotFoundError: + return None + + def set_body(self, key, body): + name = self._fn(key) + ".body" + self._write(name, body) + + def delete(self, key): + self._delete(key, "") + self._delete(key, ".body") + + def url_to_file_path(url, filecache): """Return the file cache path based on the URL. diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py index ed705ce7df63140ec54a8eee1ccae2b1dcd41950..2cba4b0708032d62b4c1278f99e5db87ed8d90fe 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + from __future__ import division from datetime import datetime @@ -15,9 +19,11 @@ class RedisCache(BaseCache): def set(self, key, value, expires=None): if not expires: self.conn.set(key, value) - else: + elif isinstance(expires, datetime): expires = expires - datetime.utcnow() self.conn.setex(key, int(expires.total_seconds()), value) + else: + self.conn.setex(key, expires, value) def delete(self, key): self.conn.delete(key) diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/compat.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/compat.py index 33b5aed0a322d9dfdf35daffeb293bc56ff39c54..ccec9379dba2b03015ce123dd04a042f32431235 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/compat.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/compat.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + try: from urllib.parse import urljoin except ImportError: @@ -9,7 +13,6 @@ try: except ImportError: import pickle - # Handle the case where the requests module has been patched to not have # urllib3 bundled as part of its source. try: diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py index dafe55ca70c40e2528c08c4ee718c59debb80af2..7f23529f1155cd3bbfde335ccdb7fc483b9d2d19 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + """ The httplib2 algorithms ported for use with requests. """ @@ -9,7 +13,7 @@ from email.utils import parsedate_tz from pip._vendor.requests.structures import CaseInsensitiveDict -from .cache import DictCache +from .cache import DictCache, SeparateBodyBaseCache from .serialize import Serializer @@ -17,19 +21,20 @@ logger = logging.getLogger(__name__) URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") +PERMANENT_REDIRECT_STATUSES = (301, 308) + def parse_uri(uri): """Parses a URI using the regex given in Appendix B of RFC 3986. - (scheme, authority, path, query, fragment) = parse_uri(uri) + (scheme, authority, path, query, fragment) = parse_uri(uri) """ groups = URI.match(uri).groups() return (groups[1], groups[3], groups[4], groups[6], groups[8]) class CacheController(object): - """An interface to see if request should cached or not. - """ + """An interface to see if request should cached or not.""" def __init__( self, cache=None, cache_etags=True, serializer=None, status_codes=None @@ -37,7 +42,7 @@ class CacheController(object): self.cache = DictCache() if cache is None else cache self.cache_etags = cache_etags self.serializer = serializer or Serializer() - self.cacheable_status_codes = status_codes or (200, 203, 300, 301) + self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308) @classmethod def _urlnorm(cls, uri): @@ -141,23 +146,29 @@ class CacheController(object): logger.debug("No cache entry available") return False + if isinstance(self.cache, SeparateBodyBaseCache): + body_file = self.cache.get_body(cache_url) + else: + body_file = None + # Check whether it can be deserialized - resp = self.serializer.loads(request, cache_data) + resp = self.serializer.loads(request, cache_data, body_file) if not resp: logger.warning("Cache entry deserialization failed, entry ignored") return False - # If we have a cached 301, return it immediately. We don't - # need to test our response for other headers b/c it is + # If we have a cached permanent redirect, return it immediately. We + # don't need to test our response for other headers b/c it is # intrinsically "cacheable" as it is Permanent. + # # See: # https://tools.ietf.org/html/rfc7231#section-6.4.2 # # Client can try to refresh the value by repeating the request # with cache busting headers as usual (ie no-cache). - if resp.status == 301: + if int(resp.status) in PERMANENT_REDIRECT_STATUSES: msg = ( - 'Returning cached "301 Moved Permanently" response ' + "Returning cached permanent redirect response " "(ignoring date and etag information)" ) logger.debug(msg) @@ -244,6 +255,26 @@ class CacheController(object): return new_headers + def _cache_set(self, cache_url, request, response, body=None, expires_time=None): + """ + Store the data in the cache. + """ + if isinstance(self.cache, SeparateBodyBaseCache): + # We pass in the body separately; just put a placeholder empty + # string in the metadata. + self.cache.set( + cache_url, + self.serializer.dumps(request, response, b""), + expires=expires_time, + ) + self.cache.set_body(cache_url, body) + else: + self.cache.set( + cache_url, + self.serializer.dumps(request, response, body), + expires=expires_time, + ) + def cache_response(self, request, response, body=None, status_codes=None): """ Algorithm for caching requests. @@ -261,6 +292,11 @@ class CacheController(object): response_headers = CaseInsensitiveDict(response.headers) + if "date" in response_headers: + date = calendar.timegm(parsedate_tz(response_headers["date"])) + else: + date = 0 + # If we've been given a body, our response has a Content-Length, that # Content-Length is valid then we can check to see if the body we've # been given matches the expected size, and if it doesn't we'll just @@ -304,35 +340,62 @@ class CacheController(object): # If we've been given an etag, then keep the response if self.cache_etags and "etag" in response_headers: + expires_time = 0 + if response_headers.get("expires"): + expires = parsedate_tz(response_headers["expires"]) + if expires is not None: + expires_time = calendar.timegm(expires) - date + + expires_time = max(expires_time, 14 * 86400) + + logger.debug("etag object cached for {0} seconds".format(expires_time)) logger.debug("Caching due to etag") - self.cache.set( - cache_url, self.serializer.dumps(request, response, body=body) - ) + self._cache_set(cache_url, request, response, body, expires_time) - # Add to the cache any 301s. We do this before looking that - # the Date headers. - elif response.status == 301: - logger.debug("Caching permanant redirect") - self.cache.set(cache_url, self.serializer.dumps(request, response)) + # Add to the cache any permanent redirects. We do this before looking + # that the Date headers. + elif int(response.status) in PERMANENT_REDIRECT_STATUSES: + logger.debug("Caching permanent redirect") + self._cache_set(cache_url, request, response, b"") # Add to the cache if the response headers demand it. If there # is no date header then we can't do anything about expiring # the cache. elif "date" in response_headers: + date = calendar.timegm(parsedate_tz(response_headers["date"])) # cache when there is a max-age > 0 if "max-age" in cc and cc["max-age"] > 0: logger.debug("Caching b/c date exists and max-age > 0") - self.cache.set( - cache_url, self.serializer.dumps(request, response, body=body) + expires_time = cc["max-age"] + self._cache_set( + cache_url, + request, + response, + body, + expires_time, ) # If the request can expire, it means we should cache it # in the meantime. elif "expires" in response_headers: if response_headers["expires"]: - logger.debug("Caching b/c of expires header") - self.cache.set( - cache_url, self.serializer.dumps(request, response, body=body) + expires = parsedate_tz(response_headers["expires"]) + if expires is not None: + expires_time = calendar.timegm(expires) - date + else: + expires_time = None + + logger.debug( + "Caching b/c of expires header. expires in {0} seconds".format( + expires_time + ) + ) + self._cache_set( + cache_url, + request, + response, + body, + expires_time, ) def update_cached_response(self, request, response): @@ -371,6 +434,6 @@ class CacheController(object): cached_response.status = 200 # update our cache - self.cache.set(cache_url, self.serializer.dumps(request, cached_response)) + self._cache_set(cache_url, request, cached_response) return cached_response diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py index 30ed4c5a62a99906ab662a8acfba2ab75e82af2e..f5ed5f6f6ec0eae90a9f48753622b2b5ee5d4a4f 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py @@ -1,4 +1,9 @@ -from io import BytesIO +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + +from tempfile import NamedTemporaryFile +import mmap class CallbackFileWrapper(object): @@ -11,10 +16,17 @@ class CallbackFileWrapper(object): This class uses members with a double underscore (__) leading prefix so as not to accidentally shadow an attribute. + + The data is stored in a temporary file until it is all available. As long + as the temporary files directory is disk-based (sometimes it's a + memory-backed-``tmpfs`` on Linux), data will be unloaded to disk if memory + pressure is high. For small files the disk usually won't be used at all, + it'll all be in the filesystem memory cache, so there should be no + performance impact. """ def __init__(self, fp, callback): - self.__buf = BytesIO() + self.__buf = NamedTemporaryFile("rb+", delete=True) self.__fp = fp self.__callback = callback @@ -49,7 +61,19 @@ class CallbackFileWrapper(object): def _close(self): if self.__callback: - self.__callback(self.__buf.getvalue()) + if self.__buf.tell() == 0: + # Empty file: + result = b"" + else: + # Return the data without actually loading it into memory, + # relying on Python's buffer API and mmap(). mmap() just gives + # a view directly into the filesystem's memory cache, so it + # doesn't result in duplicate memory use. + self.__buf.seek(0, 0) + result = memoryview( + mmap.mmap(self.__buf.fileno(), 0, access=mmap.ACCESS_READ) + ) + self.__callback(result) # We assign this to None here, because otherwise we can get into # really tricky problems where the CPython interpreter dead locks @@ -58,9 +82,16 @@ class CallbackFileWrapper(object): # and allows the garbage collector to do it's thing normally. self.__callback = None + # Closing the temporary file releases memory and frees disk space. + # Important when caching big files. + self.__buf.close() + def read(self, amt=None): data = self.__fp.read(amt) - self.__buf.write(data) + if data: + # We may be dealing with b'', a sign that things are over: + # it's passed e.g. after we've already closed self.__buf. + self.__buf.write(data) if self.__is_fp_closed(): self._close() diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py index 6c0e9790d5d0764f2ca005ae955d644bc2098d75..ebe4a96f589474f6f441858de2bb961c5e473c6d 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + import calendar import time diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py index 3b6ec2de1c130e5539d5227b26894e10e77e6c44..7fe1a3e33a3adbfd9ad1126a22d7175154ebc200 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + import base64 import io import json @@ -17,24 +21,18 @@ def _b64_decode_str(s): return _b64_decode_bytes(s).decode("utf8") -class Serializer(object): +_default_body_read = object() + +class Serializer(object): def dumps(self, request, response, body=None): response_headers = CaseInsensitiveDict(response.headers) if body is None: + # When a body isn't passed in, we'll read the response. We + # also update the response with a new file handler to be + # sure it acts as though it was never read. body = response.read(decode_content=False) - - # NOTE: 99% sure this is dead code. I'm only leaving it - # here b/c I don't have a test yet to prove - # it. Basically, before using - # `cachecontrol.filewrapper.CallbackFileWrapper`, - # this made an effort to reset the file handle. The - # `CallbackFileWrapper` short circuits this code by - # setting the body as the content is consumed, the - # result being a `body` argument is *always* passed - # into cache_response, and in turn, - # `Serializer.dump`. response._fp = io.BytesIO(body) # NOTE: This is all a bit weird, but it's really important that on @@ -46,7 +44,7 @@ class Serializer(object): # enough to have msgpack know the difference. data = { u"response": { - u"body": body, + u"body": body, # Empty bytestring if body is stored separately u"headers": dict( (text_type(k), text_type(v)) for k, v in response.headers.items() ), @@ -71,7 +69,7 @@ class Serializer(object): return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)]) - def loads(self, request, data): + def loads(self, request, data, body_file=None): # Short circuit if we've been given an empty set of data if not data: return @@ -94,14 +92,14 @@ class Serializer(object): # Dispatch to the actual load method for the given version try: - return getattr(self, "_loads_v{}".format(ver))(request, data) + return getattr(self, "_loads_v{}".format(ver))(request, data, body_file) except AttributeError: # This is a version we don't have a loads function for, so we'll # just treat it as a miss and return None return - def prepare_response(self, request, cached): + def prepare_response(self, request, cached, body_file=None): """Verify our vary headers match and construct a real urllib3 HTTPResponse object. """ @@ -127,7 +125,10 @@ class Serializer(object): cached["response"]["headers"] = headers try: - body = io.BytesIO(body_raw) + if body_file is None: + body = io.BytesIO(body_raw) + else: + body = body_file except TypeError: # This can happen if cachecontrol serialized to v1 format (pickle) # using Python 2. A Python 2 str(byte string) will be unpickled as @@ -139,21 +140,22 @@ class Serializer(object): return HTTPResponse(body=body, preload_content=False, **cached["response"]) - def _loads_v0(self, request, data): + def _loads_v0(self, request, data, body_file=None): # The original legacy cache data. This doesn't contain enough # information to construct everything we need, so we'll treat this as # a miss. return - def _loads_v1(self, request, data): + def _loads_v1(self, request, data, body_file=None): try: cached = pickle.loads(data) except ValueError: return - return self.prepare_response(request, cached) + return self.prepare_response(request, cached, body_file) - def _loads_v2(self, request, data): + def _loads_v2(self, request, data, body_file=None): + assert body_file is None try: cached = json.loads(zlib.decompress(data).decode("utf8")) except (ValueError, zlib.error): @@ -171,18 +173,18 @@ class Serializer(object): for k, v in cached["vary"].items() ) - return self.prepare_response(request, cached) + return self.prepare_response(request, cached, body_file) - def _loads_v3(self, request, data): + def _loads_v3(self, request, data, body_file): # Due to Python 2 encoding issues, it's impossible to know for sure # exactly how to load v3 entries, thus we'll treat these as a miss so # that they get rewritten out as v4 entries. return - def _loads_v4(self, request, data): + def _loads_v4(self, request, data, body_file=None): try: cached = msgpack.loads(data, raw=False) except ValueError: return - return self.prepare_response(request, cached) + return self.prepare_response(request, cached, body_file) diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py index d8e6fc6a9e3bbe8b9b3c032298deb08e764d32f8..b6ee7f2039801c9792dfe6e473843fb0a4bc4a5b 100644 --- a/env/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py +++ b/env/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2015 Eric Larson +# +# SPDX-License-Identifier: Apache-2.0 + from .adapter import CacheControlAdapter from .cache import DictCache diff --git a/env/Lib/site-packages/pip/_vendor/certifi/__init__.py b/env/Lib/site-packages/pip/_vendor/certifi/__init__.py index 17aaf900bdafc6e09392f741f0ee19ea228d651b..705f416d6b06ce5f51b3ff47c49d078e93c6f034 100644 --- a/env/Lib/site-packages/pip/_vendor/certifi/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/certifi/__init__.py @@ -1,3 +1,4 @@ from .core import contents, where -__version__ = "2020.12.05" +__all__ = ["contents", "where"] +__version__ = "2023.05.07" diff --git a/env/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 99d32824883247cc407f8e383caf487b5f3a287f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-39.pyc deleted file mode 100644 index 4612f777771face6c86a29b2157064620795bdcb..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-39.pyc deleted file mode 100644 index 61150cb275e196c9b6cc95dfacb202ace573d423..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/certifi/cacert.pem b/env/Lib/site-packages/pip/_vendor/certifi/cacert.pem index c9459dc85d1ffe628c4475552b3e8062af9083a5..5183934bb755cb4c761c4608229b3e8372dc2c3e 100644 --- a/env/Lib/site-packages/pip/_vendor/certifi/cacert.pem +++ b/env/Lib/site-packages/pip/_vendor/certifi/cacert.pem @@ -28,36 +28,6 @@ DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== -----END CERTIFICATE----- -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 -# Label: "GlobalSign Root CA - R2" -# Serial: 4835703278459682885658125 -# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 -# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe -# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e ------BEGIN CERTIFICATE----- -MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G -A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp -Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 -MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG -A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL -v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 -eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq -tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd -C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa -zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB -mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH -V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n -bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG -3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs -J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO -291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS -ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd -AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 -TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== ------END CERTIFICATE----- - # Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Label: "Entrust.net Premium 2048 Secure Server CA" @@ -188,48 +158,6 @@ l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== -----END CERTIFICATE----- -# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority -# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority -# Label: "QuoVadis Root CA" -# Serial: 985026699 -# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24 -# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9 -# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73 ------BEGIN CERTIFICATE----- -MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC -TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 -aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 -aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz -MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw -IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR -dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp -li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D -rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ -WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug -F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU -xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC -Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv -dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw -ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl -IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh -c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy -ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh -Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI -KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T -KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq -y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p -dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD -VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL -MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk -fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 -7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R -cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y -mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW -xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK -SnQ2+Q== ------END CERTIFICATE----- - # Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited # Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited # Label: "QuoVadis Root CA 2" @@ -345,33 +273,6 @@ JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== -----END CERTIFICATE----- -# Issuer: CN=Sonera Class2 CA O=Sonera -# Subject: CN=Sonera Class2 CA O=Sonera -# Label: "Sonera Class 2 Root CA" -# Serial: 29 -# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb -# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27 -# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27 ------BEGIN CERTIFICATE----- -MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP -MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx -MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV -BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o -Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt -5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s -3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej -vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu -8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw -DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG -MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil -zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ -3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD -FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 -Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 -ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M ------END CERTIFICATE----- - # Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com # Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com # Label: "XRamp Global CA Root" @@ -560,34 +461,6 @@ vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep +OkuE6N36B9K -----END CERTIFICATE----- -# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co. -# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co. -# Label: "DST Root CA X3" -# Serial: 91299735575339953335919266965803778155 -# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5 -# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13 -# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39 ------BEGIN CERTIFICATE----- -MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ -MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT -DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow -PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD -Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O -rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq -OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b -xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw -7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD -aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG -SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 -ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr -AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz -R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 -JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo -Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ ------END CERTIFICATE----- - # Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG # Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG # Label: "SwissSign Gold CA - G2" @@ -763,37 +636,6 @@ BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB ZQ== -----END CERTIFICATE----- -# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. -# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. -# Label: "Network Solutions Certificate Authority" -# Serial: 116697915152937497490437556386812487904 -# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e -# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce -# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c ------BEGIN CERTIFICATE----- -MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi -MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu -MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp -dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV -UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO -ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz -c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP -OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl -mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF -BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 -qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw -gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu -bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp -dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 -6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ -h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH -/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv -wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN -pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey ------END CERTIFICATE----- - # Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited # Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited # Label: "COMODO ECC Certification Authority" @@ -848,36 +690,6 @@ t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== -----END CERTIFICATE----- -# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc -# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc -# Label: "Cybertrust Global Root" -# Serial: 4835703278459682877484360 -# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 -# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 -# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 ------BEGIN CERTIFICATE----- -MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG -A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh -bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE -ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS -b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 -7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS -J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y -HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP -t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz -FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY -XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ -MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw -hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js -MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA -A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj -Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx -XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o -omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc -A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW -WL1WMRJOEcgh4LMRkWXbtKaIOM5V ------END CERTIFICATE----- - # Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority # Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority # Label: "ePKI Root Certification Authority" @@ -947,67 +759,6 @@ i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN 9u6wWk5JRFRYX0KD -----END CERTIFICATE----- -# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only -# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only -# Label: "GeoTrust Primary Certification Authority - G2" -# Serial: 80682863203381065782177908751794619243 -# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a -# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0 -# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66 ------BEGIN CERTIFICATE----- -MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL -MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj -KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 -MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 -eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV -BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw -NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV -BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH -MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL -So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal -tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO -BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG -CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT -qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz -rD6ogRLQy7rQkgu2npaqBA+K ------END CERTIFICATE----- - -# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only -# Label: "VeriSign Universal Root Certification Authority" -# Serial: 85209574734084581917763752644031726877 -# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19 -# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54 -# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c ------BEGIN CERTIFICATE----- -MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB -vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL -ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp -U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W -ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe -Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX -MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 -IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y -IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh -bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF -9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH -H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H -LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN -/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT -rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud -EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw -WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs -exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud -DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 -sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ -seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz -4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ -BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR -lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 -7M2CYfE45k+XmCpajQ== ------END CERTIFICATE----- - # Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) # Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) # Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny" @@ -1243,105 +994,6 @@ naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== -----END CERTIFICATE----- -# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. -# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A. -# Label: "Chambers of Commerce Root - 2008" -# Serial: 11806822484801597146 -# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7 -# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c -# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0 ------BEGIN CERTIFICATE----- -MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD -VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 -IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 -MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz -IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz -MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj -dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw -EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp -MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 -28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq -VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q -DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR -5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL -ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a -Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl -UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s -+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 -Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj -ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx -hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV -HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 -+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN -YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t -L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy -ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt -IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV -HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w -DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW -PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF -5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 -glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH -FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 -pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD -xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG -tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq -jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De -fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg -OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ -d0jQ ------END CERTIFICATE----- - -# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. -# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A. -# Label: "Global Chambersign Root - 2008" -# Serial: 14541511773111788494 -# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3 -# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c -# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca ------BEGIN CERTIFICATE----- -MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD -VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 -IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 -MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD -aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx -MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy -cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG -A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl -BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI -hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed -KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 -G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 -zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 -ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG -HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 -Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V -yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e -beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r -6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh -wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog -zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW -BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr -ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp -ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk -cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt -YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC -CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow -KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI -hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ -UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz -X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x -fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz -a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd -Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd -SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O -AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso -M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge -v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z -09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B ------END CERTIFICATE----- - # Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. # Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. # Label: "Go Daddy Root Certificate Authority - G2" @@ -1640,78 +1292,6 @@ t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 -----END CERTIFICATE----- -# Issuer: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes -# Subject: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes -# Label: "EC-ACC" -# Serial: -23701579247955709139626555126524820479 -# MD5 Fingerprint: eb:f5:9d:29:0d:61:f9:42:1f:7c:c2:ba:6d:e3:15:09 -# SHA1 Fingerprint: 28:90:3a:63:5b:52:80:fa:e6:77:4c:0b:6d:a7:d6:ba:a6:4a:f2:e8 -# SHA256 Fingerprint: 88:49:7f:01:60:2f:31:54:24:6a:e2:8c:4d:5a:ef:10:f1:d8:7e:bb:76:62:6f:4a:e0:b7:f9:5b:a7:96:87:99 ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB -8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy -dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1 -YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3 -dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh -IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD -LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG -EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g -KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD -ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu -bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg -ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R -85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm -4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV -HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd -QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t -lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB -o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E -BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4 -opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo -dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW -ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN -AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y -/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k -SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy -Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS -Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl -nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI= ------END CERTIFICATE----- - -# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority -# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority -# Label: "Hellenic Academic and Research Institutions RootCA 2011" -# Serial: 0 -# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9 -# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d -# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71 ------BEGIN CERTIFICATE----- -MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix -RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 -dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p -YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw -NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK -EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl -cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl -c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB -BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz -dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ -fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns -bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD -75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP -FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV -HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp -5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu -b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA -A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p -6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 -TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 -dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys -Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI -l7WdmplNsDz4SgCbZN2fOUvRJ9e4 ------END CERTIFICATE----- - # Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 # Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 # Label: "Actalis Authentication Root CA" @@ -1753,35 +1333,6 @@ LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== -----END CERTIFICATE----- -# Issuer: O=Trustis Limited OU=Trustis FPS Root CA -# Subject: O=Trustis Limited OU=Trustis FPS Root CA -# Label: "Trustis FPS Root CA" -# Serial: 36053640375399034304724988975563710553 -# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d -# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04 -# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d ------BEGIN CERTIFICATE----- -MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF -MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL -ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx -MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc -MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ -AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH -iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj -vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA -0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB -OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ -BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E -FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 -GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW -zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 -1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE -f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F -jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN -ZetX2fNXlrtIzYE= ------END CERTIFICATE----- - # Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 # Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 # Label: "Buypass Class 2 Root CA" @@ -2600,27 +2151,6 @@ zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= -----END CERTIFICATE----- -# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 -# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 -# Label: "GlobalSign ECC Root CA - R4" -# Serial: 14367148294922964480859022125800977897474 -# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e -# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb -# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c ------BEGIN CERTIFICATE----- -MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk -MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH -bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX -DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD -QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ -FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw -DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F -uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX -kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs -ewv4n4Q= ------END CERTIFICATE----- - # Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 # Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 # Label: "GlobalSign ECC Root CA - R5" @@ -2643,86 +2173,6 @@ KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg xwy8p2Fp8fc74SrL+SvzZpA3 -----END CERTIFICATE----- -# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden -# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden -# Label: "Staat der Nederlanden Root CA - G3" -# Serial: 10003001 -# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37 -# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc -# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28 ------BEGIN CERTIFICATE----- -MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO -TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh -dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX -DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl -ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv -b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP -cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW -IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX -xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy -KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR -9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az -5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 -6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 -Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP -bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt -BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt -XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF -MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd -INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD -U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp -LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 -Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp -gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh -/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw -0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A -fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq -4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR -1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ -QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM -94B7IWcnMFk= ------END CERTIFICATE----- - -# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden -# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden -# Label: "Staat der Nederlanden EV Root CA" -# Serial: 10000013 -# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba -# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb -# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a ------BEGIN CERTIFICATE----- -MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO -TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh -dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y -MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg -TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS -b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS -M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC -UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d -Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p -rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l -pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb -j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC -KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS -/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X -cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH -1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP -px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB -/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 -MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI -eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u -2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS -v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC -wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy -CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e -vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 -Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa -Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL -eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 -FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc -7uzXLg== ------END CERTIFICATE----- - # Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust # Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust # Label: "IdenTrust Commercial Root CA 1" @@ -3330,116 +2780,6 @@ T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== -----END CERTIFICATE----- -# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Label: "TrustCor RootCert CA-1" -# Serial: 15752444095811006489 -# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 -# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a -# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c ------BEGIN CERTIFICATE----- -MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD -VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk -MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U -cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y -IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB -pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h -IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG -A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU -cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid -RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V -seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme -9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV -EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW -hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ -DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw -DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD -ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I -/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf -ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ -yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts -L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN -zl/HHk484IkzlQsPpTLWPFp5LBk= ------END CERTIFICATE----- - -# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Label: "TrustCor RootCert CA-2" -# Serial: 2711694510199101698 -# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 -# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 -# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 ------BEGIN CERTIFICATE----- -MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV -BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw -IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy -dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig -Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk -MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg -Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD -VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy -dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ -QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq -1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp -2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK -DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape -az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF -3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 -oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM -g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 -mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh -8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd -BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U -nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw -DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX -dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ -MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL -/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX -CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa -ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW -2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 -N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 -Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB -As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp -5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu -1uwJ ------END CERTIFICATE----- - -# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority -# Label: "TrustCor ECA-1" -# Serial: 9548242946988625984 -# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c -# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd -# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD -VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk -MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U -cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y -IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV -BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw -IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy -dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig -RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb -3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA -BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 -3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou -owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ -wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF -ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf -BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ -MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv -civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 -AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F -hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 -soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI -WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi -tJ/X5g== ------END CERTIFICATE----- - # Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation # Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation # Label: "SSL.com Root Certification Authority RSA" @@ -3635,126 +2975,6 @@ rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 -----END CERTIFICATE----- -# Issuer: CN=GTS Root R1 O=Google Trust Services LLC -# Subject: CN=GTS Root R1 O=Google Trust Services LLC -# Label: "GTS Root R1" -# Serial: 146587175971765017618439757810265552097 -# MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85 -# SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8 -# SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72 ------BEGIN CERTIFICATE----- -MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH -MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM -QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy -MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl -cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB -AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM -f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX -mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7 -zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P -fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc -vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4 -Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp -zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO -Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW -k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+ -DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF -lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV -HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW -Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1 -d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z -XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR -gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3 -d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv -J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg -DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM -+SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy -F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9 -SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws -E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl ------END CERTIFICATE----- - -# Issuer: CN=GTS Root R2 O=Google Trust Services LLC -# Subject: CN=GTS Root R2 O=Google Trust Services LLC -# Label: "GTS Root R2" -# Serial: 146587176055767053814479386953112547951 -# MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b -# SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d -# SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60 ------BEGIN CERTIFICATE----- -MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH -MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM -QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy -MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl -cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB -AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv -CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg -GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu -XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd -re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu -PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1 -mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K -8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj -x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR -nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0 -kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok -twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV -HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp -8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT -vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT -z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA -pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb -pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB -R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R -RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk -0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC -5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF -izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn -yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC ------END CERTIFICATE----- - -# Issuer: CN=GTS Root R3 O=Google Trust Services LLC -# Subject: CN=GTS Root R3 O=Google Trust Services LLC -# Label: "GTS Root R3" -# Serial: 146587176140553309517047991083707763997 -# MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25 -# SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5 -# SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5 ------BEGIN CERTIFICATE----- -MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw -CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU -MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw -MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp -Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA -IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout -736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A -DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud -DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk -fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA -njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd ------END CERTIFICATE----- - -# Issuer: CN=GTS Root R4 O=Google Trust Services LLC -# Subject: CN=GTS Root R4 O=Google Trust Services LLC -# Label: "GTS Root R4" -# Serial: 146587176229350439916519468929765261721 -# MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26 -# SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb -# SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd ------BEGIN CERTIFICATE----- -MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw -CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU -MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw -MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp -Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA -IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu -hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l -xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud -DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0 -CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx -sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w== ------END CERTIFICATE----- - # Issuer: CN=UCA Global G2 Root O=UniTrust # Subject: CN=UCA Global G2 Root O=UniTrust # Label: "UCA Global G2 Root" @@ -4323,3 +3543,1047 @@ LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul 9XXeifdy -----END CERTIFICATE----- + +# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS" +# Serial: 131542671362353147877283741781055151509 +# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb +# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a +# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb +-----BEGIN CERTIFICATE----- +MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw +CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw +FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S +Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5 +MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL +DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS +QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH +sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK +Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu +SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC +MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy +v+c= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Label: "GlobalSign Root R46" +# Serial: 1552617688466950547958867513931858518042577 +# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef +# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90 +# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA +MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD +VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy +MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt +c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ +OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG +vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud +316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo +0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE +y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF +zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE ++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN +I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs +x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa +ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC +4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4 +7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg +JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti +2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk +pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF +FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt +rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk +ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5 +u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP +4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6 +N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3 +vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6 +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Label: "GlobalSign Root E46" +# Serial: 1552617690338932563915843282459653771421763 +# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f +# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84 +# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58 +-----BEGIN CERTIFICATE----- +MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx +CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD +ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw +MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex +HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq +R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd +yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ +7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8 ++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A= +-----END CERTIFICATE----- + +# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Label: "GLOBALTRUST 2020" +# Serial: 109160994242082918454945253 +# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8 +# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2 +# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a +-----BEGIN CERTIFICATE----- +MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG +A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw +FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx +MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u +aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b +RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z +YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3 +QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw +yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+ +BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ +SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH +r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0 +4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me +dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw +q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2 +nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu +H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA +VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC +XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd +6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf ++I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi +kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7 +wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB +TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C +MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn +4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I +aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy +qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg== +-----END CERTIFICATE----- + +# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Label: "ANF Secure Server Root CA" +# Serial: 996390341000653745 +# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96 +# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74 +# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99 +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV +BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk +YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV +BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN +MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF +UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD +VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v +dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj +cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q +yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH +2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX +H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL +zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR +p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz +W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/ +SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn +LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3 +n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B +u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj +o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L +9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej +rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK +pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0 +vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq +OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ +/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9 +2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI ++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2 +MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo +tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw= +-----END CERTIFICATE----- + +# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum EC-384 CA" +# Serial: 160250656287871593594747141429395092468 +# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1 +# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed +# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6 +-----BEGIN CERTIFICATE----- +MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw +CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw +JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT +EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0 +WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT +LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX +BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE +KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm +Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8 +EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J +UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn +nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k= +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Root CA" +# Serial: 40870380103424195783807378461123655149 +# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29 +# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5 +# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6 +MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu +MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV +BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw +MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg +U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ +n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q +p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq +NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF +8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3 +HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa +mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi +7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF +ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P +qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ +v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6 +Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1 +vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD +ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4 +WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo +zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR +5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ +GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf +5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq +0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D +P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM +qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP +0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf +E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb +-----END CERTIFICATE----- + +# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Label: "TunTrust Root CA" +# Serial: 108534058042236574382096126452369648152337120275 +# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4 +# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb +# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL +BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg +Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv +b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG +EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u +IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ +n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd +2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF +VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ +GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF +li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU +r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2 +eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb +MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg +jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB +7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW +5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE +ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z +xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu +QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4 +FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH +22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP +xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn +dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5 +Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b +nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ +CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH +u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj +d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS RSA Root CA 2021" +# Serial: 76817823531813593706434026085292783742 +# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91 +# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d +# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l +mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE +4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv +a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M +pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw +Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b +LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY +AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB +AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq +E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr +W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ +CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU +X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3 +f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja +H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP +JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P +zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt +jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0 +/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT +BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79 +aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW +xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU +63ZTGI0RmLo= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS ECC Root CA 2021" +# Serial: 137515985548005187474074462014555733966 +# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0 +# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48 +# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01 +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7 +KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y +STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw +SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN +nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 1977337328857672817 +# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3 +# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe +# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1 +MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc +tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd +IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j +b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC +AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw +ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m +iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF +Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ +hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P +Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE +EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV +1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t +CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR +5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw +f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9 +ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK +GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV +-----END CERTIFICATE----- + +# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus ECC Root CA" +# Serial: 630369271402956006249506845124680065938238527194 +# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85 +# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1 +# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3 +-----BEGIN CERTIFICATE----- +MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw +RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY +BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz +MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u +LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0 +v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd +e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw +V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA +AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG +GJTO +-----END CERTIFICATE----- + +# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus Root CA" +# Serial: 387574501246983434957692974888460947164905180485 +# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc +# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7 +# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87 +-----BEGIN CERTIFICATE----- +MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL +BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x +FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx +MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s +THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc +IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU +AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+ +GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9 +8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH +flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt +J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim +0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN +pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ +UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW +OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB +AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet +8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd +nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j +bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM +Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv +TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS +S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr +I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9 +b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB +UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P +Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven +sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s= +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X2 O=Internet Security Research Group +# Subject: CN=ISRG Root X2 O=Internet Security Research Group +# Label: "ISRG Root X2" +# Serial: 87493402998870891108772069816698636114 +# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5 +# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af +# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70 +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw +CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg +R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00 +MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT +ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW ++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9 +ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI +zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW +tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1 +/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- + +# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Label: "HiPKI Root CA - G1" +# Serial: 60966262342023497858655262305426234976 +# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3 +# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60 +# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc +-----BEGIN CERTIFICATE----- +MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa +Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3 +YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw +qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv +Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6 +lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz +Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ +KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK +FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj +HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr +y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ +/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM +a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6 +fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG +SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi +7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc +SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza +ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc +XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg +iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho +L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF +Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr +kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+ +vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU +YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 159662223612894884239637590694 +# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc +# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28 +# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2 +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD +VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw +MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g +UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT +BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx +uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV +HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/ ++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147 +bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 159662320309726417404178440727 +# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40 +# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a +# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo +27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w +Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw +TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl +qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH +szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8 +Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk +MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92 +wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p +aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN +VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb +C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy +h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4 +7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J +ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef +MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/ +Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT +6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ +0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm +2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb +bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 159662449406622349769042896298 +# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc +# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94 +# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8 +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt +nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY +6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu +MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k +RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg +f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV ++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo +dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW +Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa +G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq +gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H +vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC +B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u +NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg +yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev +HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6 +xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR +TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg +JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV +7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl +6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 159662495401136852707857743206 +# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73 +# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46 +# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48 +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G +jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2 +4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7 +VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm +ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 159662532700760215368942768210 +# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8 +# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47 +# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi +QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR +HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D +9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8 +p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj +# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj +# Label: "Telia Root CA v2" +# Serial: 7288924052977061235122729490515358 +# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48 +# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd +# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx +CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE +AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1 +NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ +MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq +AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9 +vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9 +lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD +n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT +7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o +6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC +TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6 +WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R +DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI +pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj +YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy +rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ +8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi +0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM +A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS +SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K +TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF +6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er +3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt +Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT +VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW +ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA +rBPuUBQemMc= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST BR Root CA 1 2020" +# Serial: 165870826978392376648679885835942448534 +# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed +# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67 +# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44 +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5 +NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS +zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0 +QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/ +VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW +wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV +dWNbFJWcHwHP2NVypw87 +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST EV Root CA 1 2020" +# Serial: 126288379621884218666039612629459926992 +# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e +# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07 +# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5 +NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC +/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD +wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3 +OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA +y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb +gfM0agPnIjhQW+0ZT0MW +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS ECC P384 Root G5" +# Serial: 13129116028163249804115411775095713523 +# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed +# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee +# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05 +-----BEGIN CERTIFICATE----- +MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp +Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2 +MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ +bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS +7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp +0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS +B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49 +BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ +LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4 +DXZDjC5Ty3zfDBeWUA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS RSA4096 Root G5" +# Serial: 11930366277458970227240571539258396554 +# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1 +# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35 +# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75 +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN +MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT +HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN +NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs +IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+ +ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0 +2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp +wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM +pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD +nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po +sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx +Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd +Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX +KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe +XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL +tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv +TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN +AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw +GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H +PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF +O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ +REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik +AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv +/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+ +p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw +MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF +qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK +ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+ +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root R1 O=Certainly +# Subject: CN=Certainly Root R1 O=Certainly +# Label: "Certainly Root R1" +# Serial: 188833316161142517227353805653483829216 +# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12 +# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af +# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw +PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy +dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9 +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0 +YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2 +1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT +vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed +aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0 +1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5 +r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5 +cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ +wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ +6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA +2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH +Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR +eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB +/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u +d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr +PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d +8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi +1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd +rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di +taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7 +lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj +yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn +Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy +yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n +wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6 +OV+KmalBWQewLK8= +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root E1 O=Certainly +# Subject: CN=Certainly Root E1 O=Certainly +# Label: "Certainly Root E1" +# Serial: 8168531406727139161245376702891150584 +# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9 +# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b +# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2 +-----BEGIN CERTIFICATE----- +MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw +CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu +bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ +BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s +eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK ++IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2 +QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4 +hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm +ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG +BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Subject: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Label: "E-Tugra Global Root CA RSA v3" +# Serial: 75951268308633135324246244059508261641472512052 +# MD5 Fingerprint: 22:be:10:f6:c2:f8:03:88:73:5f:33:29:47:28:47:a4 +# SHA1 Fingerprint: e9:a8:5d:22:14:52:1c:5b:aa:0a:b4:be:24:6a:23:8a:c9:ba:e2:a9 +# SHA256 Fingerprint: ef:66:b0:b1:0a:3c:db:9f:2e:36:48:c7:6b:d2:af:18:ea:d2:bf:e6:f1:17:65:5e:28:c4:06:0d:a1:a3:f4:c2 +-----BEGIN CERTIFICATE----- +MIIF8zCCA9ugAwIBAgIUDU3FzRYilZYIfrgLfxUGNPt5EDQwDQYJKoZIhvcNAQEL +BQAwgYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUt +VHVncmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYw +JAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIFJTQSB2MzAeFw0yMDAzMTgw +OTA3MTdaFw00NTAzMTIwOTA3MTdaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMG +QW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1 +Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBD +QSBSU0EgdjMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCiZvCJt3J7 +7gnJY9LTQ91ew6aEOErxjYG7FL1H6EAX8z3DeEVypi6Q3po61CBxyryfHUuXCscx +uj7X/iWpKo429NEvx7epXTPcMHD4QGxLsqYxYdE0PD0xesevxKenhOGXpOhL9hd8 +7jwH7eKKV9y2+/hDJVDqJ4GohryPUkqWOmAalrv9c/SF/YP9f4RtNGx/ardLAQO/ +rWm31zLZ9Vdq6YaCPqVmMbMWPcLzJmAy01IesGykNz709a/r4d+ABs8qQedmCeFL +l+d3vSFtKbZnwy1+7dZ5ZdHPOrbRsV5WYVB6Ws5OUDGAA5hH5+QYfERaxqSzO8bG +wzrwbMOLyKSRBfP12baqBqG3q+Sx6iEUXIOk/P+2UNOMEiaZdnDpwA+mdPy70Bt4 +znKS4iicvObpCdg604nmvi533wEKb5b25Y08TVJ2Glbhc34XrD2tbKNSEhhw5oBO +M/J+JjKsBY04pOZ2PJ8QaQ5tndLBeSBrW88zjdGUdjXnXVXHt6woq0bM5zshtQoK +5EpZ3IE1S0SVEgpnpaH/WwAH0sDM+T/8nzPyAPiMbIedBi3x7+PmBvrFZhNb/FAH +nnGGstpvdDDPk1Po3CLW3iAfYY2jLqN4MpBs3KwytQXk9TwzDdbgh3cXTJ2w2Amo +DVf3RIXwyAS+XF1a4xeOVGNpf0l0ZAWMowIDAQABo2MwYTAPBgNVHRMBAf8EBTAD +AQH/MB8GA1UdIwQYMBaAFLK0ruYt9ybVqnUtdkvAG1Mh0EjvMB0GA1UdDgQWBBSy +tK7mLfcm1ap1LXZLwBtTIdBI7zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEL +BQADggIBAImocn+M684uGMQQgC0QDP/7FM0E4BQ8Tpr7nym/Ip5XuYJzEmMmtcyQ +6dIqKe6cLcwsmb5FJ+Sxce3kOJUxQfJ9emN438o2Fi+CiJ+8EUdPdk3ILY7r3y18 +Tjvarvbj2l0Upq7ohUSdBm6O++96SmotKygY/r+QLHUWnw/qln0F7psTpURs+APQ +3SPh/QMSEgj0GDSz4DcLdxEBSL9htLX4GdnLTeqjjO/98Aa1bZL0SmFQhO3sSdPk +vmjmLuMxC1QLGpLWgti2omU8ZgT5Vdps+9u1FGZNlIM7zR6mK7L+d0CGq+ffCsn9 +9t2HVhjYsCxVYJb6CH5SkPVLpi6HfMsg2wY+oF0Dd32iPBMbKaITVaA9FCKvb7jQ +mhty3QUBjYZgv6Rn7rWlDdF/5horYmbDB7rnoEgcOMPpRfunf/ztAmgayncSd6YA +VSgU7NbHEqIbZULpkejLPoeJVF3Zr52XnGnnCv8PWniLYypMfUeUP95L6VPQMPHF +9p5J3zugkaOj/s1YzOrfr28oO6Bpm4/srK4rVJ2bBLFHIK+WEj5jlB0E5y67hscM +moi/dkfv97ALl2bSRM9gUgfh1SxKOidhd8rXj+eHDjD/DLsE4mHDosiXYY60MGo8 +bcIHX0pzLz/5FooBZu+6kcpSV3uu1OYP3Qt6f4ueJiDPO++BcYNZ +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Subject: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Label: "E-Tugra Global Root CA ECC v3" +# Serial: 218504919822255052842371958738296604628416471745 +# MD5 Fingerprint: 46:bc:81:bb:f1:b5:1e:f7:4b:96:bc:14:e2:e7:27:64 +# SHA1 Fingerprint: 8a:2f:af:57:53:b1:b0:e6:a1:04:ec:5b:6a:69:71:6d:f6:1c:e2:84 +# SHA256 Fingerprint: 87:3f:46:85:fa:7f:56:36:25:25:2e:6d:36:bc:d7:f1:6f:c2:49:51:f2:64:e4:7e:1b:95:4f:49:08:cd:ca:13 +-----BEGIN CERTIFICATE----- +MIICpTCCAiqgAwIBAgIUJkYZdzHhT28oNt45UYbm1JeIIsEwCgYIKoZIzj0EAwMw +gYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVn +cmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYD +VQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIEVDQyB2MzAeFw0yMDAzMTgwOTQ2 +NThaFw00NTAzMTIwOTQ2NThaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMGQW5r +YXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1Z3Jh +IFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBF +Q0MgdjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASOmCm/xxAeJ9urA8woLNheSBkQ +KczLWYHMjLiSF4mDKpL2w6QdTGLVn9agRtwcvHbB40fQWxPa56WzZkjnIZpKT4YK +fWzqTTKACrJ6CZtpS5iB4i7sAnCWH/31Rs7K3IKjYzBhMA8GA1UdEwEB/wQFMAMB +Af8wHwYDVR0jBBgwFoAU/4Ixcj75xGZsrTie0bBRiKWQzPUwHQYDVR0OBBYEFP+C +MXI++cRmbK04ntGwUYilkMz1MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNp +ADBmAjEA5gVYaWHlLcoNy/EZCL3W/VGSGn5jVASQkZo1kTmZ+gepZpO6yGjUij/6 +7W4WAie3AjEA3VoXK3YdZUKWpqxdinlW2Iob35reX8dQj7FbcQwm32pAAOwzkSFx +vmjkI6TZraE3 +-----END CERTIFICATE----- + +# Issuer: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD. +# Subject: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD. +# Label: "Security Communication RootCA3" +# Serial: 16247922307909811815 +# MD5 Fingerprint: 1c:9a:16:ff:9e:5c:e0:4d:8a:14:01:f4:35:5d:29:26 +# SHA1 Fingerprint: c3:03:c8:22:74:92:e5:61:a2:9c:5f:79:91:2b:1e:44:13:91:30:3a +# SHA256 Fingerprint: 24:a5:5c:2a:b0:51:44:2d:06:17:76:65:41:23:9a:4a:d0:32:d7:c5:51:75:aa:34:ff:de:2f:bc:4f:5c:52:94 +-----BEGIN CERTIFICATE----- +MIIFfzCCA2egAwIBAgIJAOF8N0D9G/5nMA0GCSqGSIb3DQEBDAUAMF0xCzAJBgNV +BAYTAkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMScw +JQYDVQQDEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTMwHhcNMTYwNjE2 +MDYxNzE2WhcNMzgwMTE4MDYxNzE2WjBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UEAxMeU2VjdXJpdHkg +Q29tbXVuaWNhdGlvbiBSb290Q0EzMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEA48lySfcw3gl8qUCBWNO0Ot26YQ+TUG5pPDXC7ltzkBtnTCHsXzW7OT4r +CmDvu20rhvtxosis5FaU+cmvsXLUIKx00rgVrVH+hXShuRD+BYD5UpOzQD11EKzA +lrenfna84xtSGc4RHwsENPXY9Wk8d/Nk9A2qhd7gCVAEF5aEt8iKvE1y/By7z/MG +TfmfZPd+pmaGNXHIEYBMwXFAWB6+oHP2/D5Q4eAvJj1+XCO1eXDe+uDRpdYMQXF7 +9+qMHIjH7Iv10S9VlkZ8WjtYO/u62C21Jdp6Ts9EriGmnpjKIG58u4iFW/vAEGK7 +8vknR+/RiTlDxN/e4UG/VHMgly1s2vPUB6PmudhvrvyMGS7TZ2crldtYXLVqAvO4 +g160a75BflcJdURQVc1aEWEhCmHCqYj9E7wtiS/NYeCVvsq1e+F7NGcLH7YMx3we +GVPKp7FKFSBWFHA9K4IsD50VHUeAR/94mQ4xr28+j+2GaR57GIgUssL8gjMunEst ++3A7caoreyYn8xrC3PsXuKHqy6C0rtOUfnrQq8PsOC0RLoi/1D+tEjtCrI8Cbn3M +0V9hvqG8OmpI6iZVIhZdXw3/JzOfGAN0iltSIEdrRU0id4xVJ/CvHozJgyJUt5rQ +T9nO/NkuHJYosQLTA70lUhw0Zk8jq/R3gpYd0VcwCBEF/VfR2ccCAwEAAaNCMEAw +HQYDVR0OBBYEFGQUfPxYchamCik0FW8qy7z8r6irMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDAUAA4ICAQDcAiMI4u8hOscNtybS +YpOnpSNyByCCYN8Y11StaSWSntkUz5m5UoHPrmyKO1o5yGwBQ8IibQLwYs1OY0PA +FNr0Y/Dq9HHuTofjcan0yVflLl8cebsjqodEV+m9NU1Bu0soo5iyG9kLFwfl9+qd +9XbXv8S2gVj/yP9kaWJ5rW4OH3/uHWnlt3Jxs/6lATWUVCvAUm2PVcTJ0rjLyjQI +UYWg9by0F1jqClx6vWPGOi//lkkZhOpn2ASxYfQAW0q3nHE3GYV5v4GwxxMOdnE+ +OoAGrgYWp421wsTL/0ClXI2lyTrtcoHKXJg80jQDdwj98ClZXSEIx2C/pHF7uNke +gr4Jr2VvKKu/S7XuPghHJ6APbw+LP6yVGPO5DtxnVW5inkYO0QR4ynKudtml+LLf +iAlhi+8kTtFZP1rUPcmTPCtk9YENFpb3ksP+MW/oKjJ0DvRMmEoYDjBU1cXrvMUV +nuiZIesnKwkK2/HmcBhWuwzkvvnoEKQTkrgc4NtnHVMDpCKn3F2SEDzq//wbEBrD +2NCcnWXL0CsnMQMeNuE9dnUM/0Umud1RvCPHX9jYhxBAEg09ODfnRDwYwFMJZI// +1ZqmfHAuc1Uh6N//g7kdPjIe1qZ9LPFm6Vwdp6POXiUyK+OVrCoHzrQoeIY8Laad +TdJ0MN1kURXbg4NR16/9M51NZg== +-----END CERTIFICATE----- + +# Issuer: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Subject: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Label: "Security Communication ECC RootCA1" +# Serial: 15446673492073852651 +# MD5 Fingerprint: 7e:43:b0:92:68:ec:05:43:4c:98:ab:5d:35:2e:7e:86 +# SHA1 Fingerprint: b8:0e:26:a9:bf:d2:b2:3b:c0:ef:46:c9:ba:c7:bb:f6:1d:0d:41:41 +# SHA256 Fingerprint: e7:4f:bd:a5:5b:d5:64:c4:73:a3:6b:44:1a:a7:99:c8:a6:8e:07:74:40:e8:28:8b:9f:a1:e5:0e:4b:ba:ca:11 +-----BEGIN CERTIFICATE----- +MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYT +AkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYD +VQQDEyJTZWN1cml0eSBDb21tdW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYx +NjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTELMAkGA1UEBhMCSlAxJTAjBgNVBAoT +HFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNVBAMTIlNlY3VyaXR5 +IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+Cnnfdl +dB9sELLo5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpK +ULGjQjBAMB0GA1UdDgQWBBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu +9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3LsnNdo4gIxwwCMQDAqy0O +be0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70eN9k= +-----END CERTIFICATE----- + +# Issuer: CN=BJCA Global Root CA1 O=BEIJING CERTIFICATE AUTHORITY +# Subject: CN=BJCA Global Root CA1 O=BEIJING CERTIFICATE AUTHORITY +# Label: "BJCA Global Root CA1" +# Serial: 113562791157148395269083148143378328608 +# MD5 Fingerprint: 42:32:99:76:43:33:36:24:35:07:82:9b:28:f9:d0:90 +# SHA1 Fingerprint: d5:ec:8d:7b:4c:ba:79:f4:e7:e8:cb:9d:6b:ae:77:83:10:03:21:6a +# SHA256 Fingerprint: f3:89:6f:88:fe:7c:0a:88:27:66:a7:fa:6a:d2:74:9f:b5:7a:7f:3e:98:fb:76:9c:1f:a7:b0:9c:2c:44:d5:ae +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIQVW9l47TZkGobCdFsPsBsIDANBgkqhkiG9w0BAQsFADBU +MQswCQYDVQQGEwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRI +T1JJVFkxHTAbBgNVBAMMFEJKQ0EgR2xvYmFsIFJvb3QgQ0ExMB4XDTE5MTIxOTAz +MTYxN1oXDTQ0MTIxMjAzMTYxN1owVDELMAkGA1UEBhMCQ04xJjAkBgNVBAoMHUJF +SUpJTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRCSkNBIEdsb2Jh +bCBSb290IENBMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAPFmCL3Z +xRVhy4QEQaVpN3cdwbB7+sN3SJATcmTRuHyQNZ0YeYjjlwE8R4HyDqKYDZ4/N+AZ +spDyRhySsTphzvq3Rp4Dhtczbu33RYx2N95ulpH3134rhxfVizXuhJFyV9xgw8O5 +58dnJCNPYwpj9mZ9S1WnP3hkSWkSl+BMDdMJoDIwOvqfwPKcxRIqLhy1BDPapDgR +at7GGPZHOiJBhyL8xIkoVNiMpTAK+BcWyqw3/XmnkRd4OJmtWO2y3syJfQOcs4ll +5+M7sSKGjwZteAf9kRJ/sGsciQ35uMt0WwfCyPQ10WRjeulumijWML3mG90Vr4Tq +nMfK9Q7q8l0ph49pczm+LiRvRSGsxdRpJQaDrXpIhRMsDQa4bHlW/KNnMoH1V6XK +V0Jp6VwkYe/iMBhORJhVb3rCk9gZtt58R4oRTklH2yiUAguUSiz5EtBP6DF+bHq/ +pj+bOT0CFqMYs2esWz8sgytnOYFcuX6U1WTdno9uruh8W7TXakdI136z1C2OVnZO +z2nxbkRs1CTqjSShGL+9V/6pmTW12xB3uD1IutbB5/EjPtffhZ0nPNRAvQoMvfXn +jSXWgXSHRtQpdaJCbPdzied9v3pKH9MiyRVVz99vfFXQpIsHETdfg6YmV6YBW37+ +WGgHqel62bno/1Afq8K0wM7o6v0PvY1NuLxxAgMBAAGjQjBAMB0GA1UdDgQWBBTF +7+3M2I0hxkjk49cULqcWk+WYATAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAUoKsITQfI/Ki2Pm4rzc2IInRNwPWaZ+4 +YRC6ojGYWUfo0Q0lHhVBDOAqVdVXUsv45Mdpox1NcQJeXyFFYEhcCY5JEMEE3Kli +awLwQ8hOnThJdMkycFRtwUf8jrQ2ntScvd0g1lPJGKm1Vrl2i5VnZu69mP6u775u ++2D2/VnGKhs/I0qUJDAnyIm860Qkmss9vk/Ves6OF8tiwdneHg56/0OGNFK8YT88 +X7vZdrRTvJez/opMEi4r89fO4aL/3Xtw+zuhTaRjAv04l5U/BXCga99igUOLtFkN +SoxUnMW7gZ/NfaXvCyUeOiDbHPwfmGcCCtRzRBPbUYQaVQNW4AB+dAb/OMRyHdOo +P2gxXdMJxy6MW2Pg6Nwe0uxhHvLe5e/2mXZgLR6UcnHGCyoyx5JO1UbXHfmpGQrI ++pXObSOYqgs4rZpWDW+N8TEAiMEXnM0ZNjX+VVOg4DwzX5Ze4jLp3zO7Bkqp2IRz +znfSxqxx4VyjHQy7Ct9f4qNx2No3WqB4K/TUfet27fJhcKVlmtOJNBir+3I+17Q9 +eVzYH6Eze9mCUAyTF6ps3MKCuwJXNq+YJyo5UOGwifUll35HaBC07HPKs5fRJNz2 +YqAo07WjuGS3iGJCz51TzZm+ZGiPTx4SSPfSKcOYKMryMguTjClPPGAyzQWWYezy +r/6zcCwupvI= +-----END CERTIFICATE----- + +# Issuer: CN=BJCA Global Root CA2 O=BEIJING CERTIFICATE AUTHORITY +# Subject: CN=BJCA Global Root CA2 O=BEIJING CERTIFICATE AUTHORITY +# Label: "BJCA Global Root CA2" +# Serial: 58605626836079930195615843123109055211 +# MD5 Fingerprint: 5e:0a:f6:47:5f:a6:14:e8:11:01:95:3f:4d:01:eb:3c +# SHA1 Fingerprint: f4:27:86:eb:6e:b8:6d:88:31:67:02:fb:ba:66:a4:53:00:aa:7a:a6 +# SHA256 Fingerprint: 57:4d:f6:93:1e:27:80:39:66:7b:72:0a:fd:c1:60:0f:c2:7e:b6:6d:d3:09:29:79:fb:73:85:64:87:21:28:82 +-----BEGIN CERTIFICATE----- +MIICJTCCAaugAwIBAgIQLBcIfWQqwP6FGFkGz7RK6zAKBggqhkjOPQQDAzBUMQsw +CQYDVQQGEwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRIT1JJ +VFkxHTAbBgNVBAMMFEJKQ0EgR2xvYmFsIFJvb3QgQ0EyMB4XDTE5MTIxOTAzMTgy +MVoXDTQ0MTIxMjAzMTgyMVowVDELMAkGA1UEBhMCQ04xJjAkBgNVBAoMHUJFSUpJ +TkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRCSkNBIEdsb2JhbCBS +b290IENBMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABJ3LgJGNU2e1uVCxA/jlSR9B +IgmwUVJY1is0j8USRhTFiy8shP8sbqjV8QnjAyEUxEM9fMEsxEtqSs3ph+B99iK+ ++kpRuDCK/eHeGBIK9ke35xe/J4rUQUyWPGCWwf0VHKNCMEAwHQYDVR0OBBYEFNJK +sVF/BvDRgh9Obl+rg/xI1LCRMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMAoGCCqGSM49BAMDA2gAMGUCMBq8W9f+qdJUDkpd0m2xQNz0Q9XSSpkZElaA +94M04TVOSG0ED1cxMDAtsaqdAzjbBgIxAMvMh1PLet8gUXOQwKhbYdDFUDn9hf7B +43j4ptZLvZuHjw/l1lOWqzzIQNph91Oj9w== +-----END CERTIFICATE----- diff --git a/env/Lib/site-packages/pip/_vendor/certifi/core.py b/env/Lib/site-packages/pip/_vendor/certifi/core.py index b8140cf1ae7cd6d84a484668608ec6226db20e37..c3e546604c85678dd72db35893c46ffe2d79c052 100644 --- a/env/Lib/site-packages/pip/_vendor/certifi/core.py +++ b/env/Lib/site-packages/pip/_vendor/certifi/core.py @@ -1,34 +1,20 @@ -# -*- coding: utf-8 -*- - """ certifi.py ~~~~~~~~~~ This module returns the installation location of cacert.pem or its contents. """ -import os - +import sys -class _PipPatchedCertificate(Exception): - pass +if sys.version_info >= (3, 11): -try: - # Return a certificate file on disk for a standalone pip zipapp running in - # an isolated build environment to use. Passing --cert to the standalone - # pip does not work since requests calls where() unconditionally on import. - _PIP_STANDALONE_CERT = os.environ.get("_PIP_STANDALONE_CERT") - if _PIP_STANDALONE_CERT: - def where(): - return _PIP_STANDALONE_CERT - raise _PipPatchedCertificate() - - from importlib.resources import path as get_path, read_text + from importlib.resources import as_file, files _CACERT_CTX = None _CACERT_PATH = None - def where(): + def where() -> str: # This is slightly terrible, but we want to delay extracting the file # in cases where we're inside of a zipimport situation until someone # actually calls where(), but we don't want to re-extract the file @@ -47,30 +33,76 @@ try: # We also have to hold onto the actual context manager, because # it will do the cleanup whenever it gets garbage collected, so # we will also store that at the global level as well. + _CACERT_CTX = as_file(files("pip._vendor.certifi").joinpath("cacert.pem")) + _CACERT_PATH = str(_CACERT_CTX.__enter__()) + + return _CACERT_PATH + + def contents() -> str: + return files("pip._vendor.certifi").joinpath("cacert.pem").read_text(encoding="ascii") + +elif sys.version_info >= (3, 7): + + from importlib.resources import path as get_path, read_text + + _CACERT_CTX = None + _CACERT_PATH = None + + def where() -> str: + # This is slightly terrible, but we want to delay extracting the + # file in cases where we're inside of a zipimport situation until + # someone actually calls where(), but we don't want to re-extract + # the file on every call of where(), so we'll do it once then store + # it in a global variable. + global _CACERT_CTX + global _CACERT_PATH + if _CACERT_PATH is None: + # This is slightly janky, the importlib.resources API wants you + # to manage the cleanup of this file, so it doesn't actually + # return a path, it returns a context manager that will give + # you the path when you enter it and will do any cleanup when + # you leave it. In the common case of not needing a temporary + # file, it will just return the file system location and the + # __exit__() is a no-op. + # + # We also have to hold onto the actual context manager, because + # it will do the cleanup whenever it gets garbage collected, so + # we will also store that at the global level as well. _CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem") _CACERT_PATH = str(_CACERT_CTX.__enter__()) return _CACERT_PATH -except _PipPatchedCertificate: - pass + def contents() -> str: + return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii") + +else: + import os + import types + from typing import Union + + Package = Union[types.ModuleType, str] + Resource = Union[str, "os.PathLike"] -except ImportError: # This fallback will work for Python versions prior to 3.7 that lack the # importlib.resources module but relies on the existing `where` function # so won't address issues with environments like PyOxidizer that don't set # __file__ on modules. - def read_text(_module, _path, encoding="ascii"): - with open(where(), "r", encoding=encoding) as data: + def read_text( + package: Package, + resource: Resource, + encoding: str = 'utf-8', + errors: str = 'strict' + ) -> str: + with open(where(), encoding=encoding) as data: return data.read() # If we don't have importlib.resources, then we will just do the old logic # of assuming we're on the filesystem and munge the path directly. - def where(): + def where() -> str: f = os.path.dirname(__file__) return os.path.join(f, "cacert.pem") - -def contents(): - return read_text("certifi", "cacert.pem", encoding="ascii") + def contents() -> str: + return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii") diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__init__.py b/env/Lib/site-packages/pip/_vendor/chardet/__init__.py index 80ad2546d7981394b5f5d221336c9f00236b9d66..fe581623d89d67a49eb43f3c3e88f3f450257707 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/__init__.py @@ -15,69 +15,101 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +from typing import List, Union -from .universaldetector import UniversalDetector +from .charsetgroupprober import CharSetGroupProber +from .charsetprober import CharSetProber from .enums import InputState -from .version import __version__, VERSION - +from .resultdict import ResultDict +from .universaldetector import UniversalDetector +from .version import VERSION, __version__ -__all__ = ['UniversalDetector', 'detect', 'detect_all', '__version__', 'VERSION'] +__all__ = ["UniversalDetector", "detect", "detect_all", "__version__", "VERSION"] -def detect(byte_str): +def detect( + byte_str: Union[bytes, bytearray], should_rename_legacy: bool = False +) -> ResultDict: """ Detect the encoding of the given byte string. :param byte_str: The byte sequence to examine. :type byte_str: ``bytes`` or ``bytearray`` + :param should_rename_legacy: Should we rename legacy encodings + to their more modern equivalents? + :type should_rename_legacy: ``bool`` """ if not isinstance(byte_str, bytearray): if not isinstance(byte_str, bytes): - raise TypeError('Expected object of type bytes or bytearray, got: ' - '{}'.format(type(byte_str))) - else: - byte_str = bytearray(byte_str) - detector = UniversalDetector() + raise TypeError( + f"Expected object of type bytes or bytearray, got: {type(byte_str)}" + ) + byte_str = bytearray(byte_str) + detector = UniversalDetector(should_rename_legacy=should_rename_legacy) detector.feed(byte_str) return detector.close() -def detect_all(byte_str): +def detect_all( + byte_str: Union[bytes, bytearray], + ignore_threshold: bool = False, + should_rename_legacy: bool = False, +) -> List[ResultDict]: """ Detect all the possible encodings of the given byte string. - :param byte_str: The byte sequence to examine. - :type byte_str: ``bytes`` or ``bytearray`` + :param byte_str: The byte sequence to examine. + :type byte_str: ``bytes`` or ``bytearray`` + :param ignore_threshold: Include encodings that are below + ``UniversalDetector.MINIMUM_THRESHOLD`` + in results. + :type ignore_threshold: ``bool`` + :param should_rename_legacy: Should we rename legacy encodings + to their more modern equivalents? + :type should_rename_legacy: ``bool`` """ if not isinstance(byte_str, bytearray): if not isinstance(byte_str, bytes): - raise TypeError('Expected object of type bytes or bytearray, got: ' - '{}'.format(type(byte_str))) - else: - byte_str = bytearray(byte_str) + raise TypeError( + f"Expected object of type bytes or bytearray, got: {type(byte_str)}" + ) + byte_str = bytearray(byte_str) - detector = UniversalDetector() + detector = UniversalDetector(should_rename_legacy=should_rename_legacy) detector.feed(byte_str) detector.close() - if detector._input_state == InputState.HIGH_BYTE: - results = [] - for prober in detector._charset_probers: - if prober.get_confidence() > detector.MINIMUM_THRESHOLD: - charset_name = prober.charset_name - lower_charset_name = prober.charset_name.lower() + if detector.input_state == InputState.HIGH_BYTE: + results: List[ResultDict] = [] + probers: List[CharSetProber] = [] + for prober in detector.charset_probers: + if isinstance(prober, CharSetGroupProber): + probers.extend(p for p in prober.probers) + else: + probers.append(prober) + for prober in probers: + if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD: + charset_name = prober.charset_name or "" + lower_charset_name = charset_name.lower() # Use Windows encoding name instead of ISO-8859 if we saw any # extra Windows-specific bytes - if lower_charset_name.startswith('iso-8859'): - if detector._has_win_bytes: - charset_name = detector.ISO_WIN_MAP.get(lower_charset_name, - charset_name) - results.append({ - 'encoding': charset_name, - 'confidence': prober.get_confidence(), - 'language': prober.language, - }) + if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes: + charset_name = detector.ISO_WIN_MAP.get( + lower_charset_name, charset_name + ) + # Rename legacy encodings with superset encodings if asked + if should_rename_legacy: + charset_name = detector.LEGACY_MAP.get( + charset_name.lower(), charset_name + ) + results.append( + { + "encoding": charset_name, + "confidence": prober.get_confidence(), + "language": prober.language, + } + ) if len(results) > 0: - return sorted(results, key=lambda result: -result['confidence']) + return sorted(results, key=lambda result: -result["confidence"]) return [detector.result] diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 242690e3905fc3d2bea64f9fed1f46a5a0a00a9d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/big5freq.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/big5freq.cpython-39.pyc deleted file mode 100644 index 22e5a3a5b405fb49e0ecd07504788eb717f34775..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/big5freq.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/big5prober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/big5prober.cpython-39.pyc deleted file mode 100644 index 908c06840f9601d6728b3f573ba3eeeafc3e19ca..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/big5prober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/chardistribution.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/chardistribution.cpython-39.pyc deleted file mode 100644 index 9989d318ab19c9f0fcddb06abe28dfb173338e4c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/chardistribution.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-39.pyc deleted file mode 100644 index 03d093d9005783303a25d07060ac0d6458c6a480..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/charsetprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/charsetprober.cpython-39.pyc deleted file mode 100644 index 1c50be04f05e5e4f40fe090c5e70ac24ac090267..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/charsetprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/codingstatemachine.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/codingstatemachine.cpython-39.pyc deleted file mode 100644 index 73c3104c8aa11cf93fbf43d878fb193edb95a7cd..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/codingstatemachine.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/compat.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/compat.cpython-39.pyc deleted file mode 100644 index aa6765a02e38059e3e96530c3401f8a2bb91bf79..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/compat.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/cp949prober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/cp949prober.cpython-39.pyc deleted file mode 100644 index 692bdcf85197f2c73c3b4f755b64164563a32aae..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/cp949prober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/enums.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/enums.cpython-39.pyc deleted file mode 100644 index 8ed9d05f14db8ab8feed48ad39c53b63daca2f7c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/enums.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/escprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/escprober.cpython-39.pyc deleted file mode 100644 index 482622ef89f18580c9b0e89515439e2b6d1ef4ea..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/escprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/escsm.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/escsm.cpython-39.pyc deleted file mode 100644 index 7ab2666fb5a0482417f6bcb912a5958ded1428ab..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/escsm.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/eucjpprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/eucjpprober.cpython-39.pyc deleted file mode 100644 index 66a43465cfa72e37b4ea0c7c5aa25508281a7ea8..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/eucjpprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euckrfreq.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euckrfreq.cpython-39.pyc deleted file mode 100644 index 73093afbbc99962912c07836236efb4ba88109d4..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euckrfreq.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euckrprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euckrprober.cpython-39.pyc deleted file mode 100644 index bba6aabf201ccfced52dab02e77fc50e5fff7484..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euckrprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euctwfreq.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euctwfreq.cpython-39.pyc deleted file mode 100644 index 1c4f7f75f2ddff3142de5583b5879734e5d31dfe..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euctwfreq.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euctwprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euctwprober.cpython-39.pyc deleted file mode 100644 index dc5d84ccb7b24e0ef47cd9a2760cae61c886d391..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/euctwprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/gb2312freq.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/gb2312freq.cpython-39.pyc deleted file mode 100644 index 92164a591727d598f99c59a54c72a5a654e51f86..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/gb2312freq.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/gb2312prober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/gb2312prober.cpython-39.pyc deleted file mode 100644 index 7d135e695847c53d86c4ce946c97a384a90bf099..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/gb2312prober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/hebrewprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/hebrewprober.cpython-39.pyc deleted file mode 100644 index c00756b966f3442fc6cbcc55997d0e580a79207a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/hebrewprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/jisfreq.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/jisfreq.cpython-39.pyc deleted file mode 100644 index dd5e0198ea7365906b2425f44edb8914e42787d7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/jisfreq.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/jpcntx.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/jpcntx.cpython-39.pyc deleted file mode 100644 index 2e0ac2242fba8ba4be81cde568bf387ed732e358..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/jpcntx.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-39.pyc deleted file mode 100644 index a2213cf6eeddafb3366e0e6a8ce9948521314d5c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langgreekmodel.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langgreekmodel.cpython-39.pyc deleted file mode 100644 index 967eb30becee3e0eeb1806594a6dd655a2c17abc..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langgreekmodel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-39.pyc deleted file mode 100644 index e01cd062929dc27b8282f4d35adfaabab5b2ce11..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-39.pyc deleted file mode 100644 index df5dc55783115bfae52d945f12c3c1c6fc0d8cf2..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langrussianmodel.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langrussianmodel.cpython-39.pyc deleted file mode 100644 index 035e6b2ac946b482d57c21a4f5fdfd19e195c0d4..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langrussianmodel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langthaimodel.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langthaimodel.cpython-39.pyc deleted file mode 100644 index 4444ac5e985a78c715efb7a6dbfc0ab5b9cd0d1d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langthaimodel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langturkishmodel.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langturkishmodel.cpython-39.pyc deleted file mode 100644 index da8c69bb71b5a4fb53608a6d63f0211c238305a7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/langturkishmodel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/latin1prober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/latin1prober.cpython-39.pyc deleted file mode 100644 index 22da5e306b250a224197ddaa93606c63542779c1..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/latin1prober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-39.pyc deleted file mode 100644 index 597ee0c4737ea0e4395d936f21e9b6e7aacab1d2..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-39.pyc deleted file mode 100644 index 5367772f25a5634e054459e30d5dd3c8e8855245..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/mbcssm.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/mbcssm.cpython-39.pyc deleted file mode 100644 index 83f7161fe18171b5c7febcaae21f7700bb5ad73f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/mbcssm.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-39.pyc deleted file mode 100644 index f04b346441caa82541f2812c58325aaae633f0c0..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-39.pyc deleted file mode 100644 index 85e1d972af9dfa0554f1ff4ed7d33db1f6f48e74..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/sjisprober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/sjisprober.cpython-39.pyc deleted file mode 100644 index 053a6cbc6f5de5073f9647c4821a5c41552f6cb1..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/sjisprober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/universaldetector.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/universaldetector.cpython-39.pyc deleted file mode 100644 index ba21409ee701f0adbd4ac2a2e39e517ace8ba781..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/universaldetector.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/utf8prober.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/utf8prober.cpython-39.pyc deleted file mode 100644 index b5f8a274e6f374bf861c728afa229521bc3ef729..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/utf8prober.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/version.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/version.cpython-39.pyc deleted file mode 100644 index 6b67d25af1395324763805ede852d3a9ce136ec1..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/__pycache__/version.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/big5freq.py b/env/Lib/site-packages/pip/_vendor/chardet/big5freq.py index 38f32517aa8f6cf5970f7ceddd1a415289184c3e..87d9f972edde20d1f8e391b8010703242a8de977 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/big5freq.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/big5freq.py @@ -42,9 +42,9 @@ BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75 -#Char to FreqOrder table +# Char to FreqOrder table BIG5_TABLE_SIZE = 5376 - +# fmt: off BIG5_CHAR_TO_FREQ_ORDER = ( 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16 3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32 @@ -383,4 +383,4 @@ BIG5_CHAR_TO_FREQ_ORDER = ( 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360 2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 ) - +# fmt: on diff --git a/env/Lib/site-packages/pip/_vendor/chardet/big5prober.py b/env/Lib/site-packages/pip/_vendor/chardet/big5prober.py index 98f9970122088c14a5830e091ca8a12fc8e4c563..ef09c60e327a0122e32f95f2f10a826a033c573c 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/big5prober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/big5prober.py @@ -25,23 +25,23 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine from .chardistribution import Big5DistributionAnalysis +from .codingstatemachine import CodingStateMachine +from .mbcharsetprober import MultiByteCharSetProber from .mbcssm import BIG5_SM_MODEL class Big5Prober(MultiByteCharSetProber): - def __init__(self): - super(Big5Prober, self).__init__() + def __init__(self) -> None: + super().__init__() self.coding_sm = CodingStateMachine(BIG5_SM_MODEL) self.distribution_analyzer = Big5DistributionAnalysis() self.reset() @property - def charset_name(self): + def charset_name(self) -> str: return "Big5" @property - def language(self): + def language(self) -> str: return "Chinese" diff --git a/env/Lib/site-packages/pip/_vendor/chardet/chardistribution.py b/env/Lib/site-packages/pip/_vendor/chardet/chardistribution.py index c0395f4a45aaa5c4ba1824a81d8ef8f69b46dc60..176cb996408e6681a88722783919efc0e9dafb29 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/chardistribution.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/chardistribution.py @@ -25,40 +25,58 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE, - EUCTW_TYPICAL_DISTRIBUTION_RATIO) -from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE, - EUCKR_TYPICAL_DISTRIBUTION_RATIO) -from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE, - GB2312_TYPICAL_DISTRIBUTION_RATIO) -from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE, - BIG5_TYPICAL_DISTRIBUTION_RATIO) -from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE, - JIS_TYPICAL_DISTRIBUTION_RATIO) - - -class CharDistributionAnalysis(object): +from typing import Tuple, Union + +from .big5freq import ( + BIG5_CHAR_TO_FREQ_ORDER, + BIG5_TABLE_SIZE, + BIG5_TYPICAL_DISTRIBUTION_RATIO, +) +from .euckrfreq import ( + EUCKR_CHAR_TO_FREQ_ORDER, + EUCKR_TABLE_SIZE, + EUCKR_TYPICAL_DISTRIBUTION_RATIO, +) +from .euctwfreq import ( + EUCTW_CHAR_TO_FREQ_ORDER, + EUCTW_TABLE_SIZE, + EUCTW_TYPICAL_DISTRIBUTION_RATIO, +) +from .gb2312freq import ( + GB2312_CHAR_TO_FREQ_ORDER, + GB2312_TABLE_SIZE, + GB2312_TYPICAL_DISTRIBUTION_RATIO, +) +from .jisfreq import ( + JIS_CHAR_TO_FREQ_ORDER, + JIS_TABLE_SIZE, + JIS_TYPICAL_DISTRIBUTION_RATIO, +) +from .johabfreq import JOHAB_TO_EUCKR_ORDER_TABLE + + +class CharDistributionAnalysis: ENOUGH_DATA_THRESHOLD = 1024 SURE_YES = 0.99 SURE_NO = 0.01 MINIMUM_DATA_THRESHOLD = 3 - def __init__(self): + def __init__(self) -> None: # Mapping table to get frequency order from char order (get from # GetOrder()) - self._char_to_freq_order = None - self._table_size = None # Size of above table + self._char_to_freq_order: Tuple[int, ...] = tuple() + self._table_size = 0 # Size of above table # This is a constant value which varies from language to language, # used in calculating confidence. See # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html # for further detail. - self.typical_distribution_ratio = None - self._done = None - self._total_chars = None - self._freq_chars = None + self.typical_distribution_ratio = 0.0 + self._done = False + self._total_chars = 0 + self._freq_chars = 0 self.reset() - def reset(self): + def reset(self) -> None: """reset analyser, clear any state""" # If this flag is set to True, detection is done and conclusion has # been made @@ -67,7 +85,7 @@ class CharDistributionAnalysis(object): # The number of characters whose frequency order is less than 512 self._freq_chars = 0 - def feed(self, char, char_len): + def feed(self, char: Union[bytes, bytearray], char_len: int) -> None: """feed a character with known length""" if char_len == 2: # we only care about 2-bytes character in our distribution analysis @@ -81,7 +99,7 @@ class CharDistributionAnalysis(object): if 512 > self._char_to_freq_order[order]: self._freq_chars += 1 - def get_confidence(self): + def get_confidence(self) -> float: """return confidence based on existing data""" # if we didn't receive any character in our consideration range, # return negative answer @@ -89,20 +107,21 @@ class CharDistributionAnalysis(object): return self.SURE_NO if self._total_chars != self._freq_chars: - r = (self._freq_chars / ((self._total_chars - self._freq_chars) - * self.typical_distribution_ratio)) + r = self._freq_chars / ( + (self._total_chars - self._freq_chars) * self.typical_distribution_ratio + ) if r < self.SURE_YES: return r # normalize confidence (we don't want to be 100% sure) return self.SURE_YES - def got_enough_data(self): + def got_enough_data(self) -> bool: # It is not necessary to receive all data to draw conclusion. # For charset detection, certain amount of data is enough return self._total_chars > self.ENOUGH_DATA_THRESHOLD - def get_order(self, byte_str): + def get_order(self, _: Union[bytes, bytearray]) -> int: # We do not handle characters based on the original encoding string, # but convert this encoding string to a number, here called order. # This allows multiple encodings of a language to share one frequency @@ -111,13 +130,13 @@ class CharDistributionAnalysis(object): class EUCTWDistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(EUCTWDistributionAnalysis, self).__init__() + def __init__(self) -> None: + super().__init__() self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER self._table_size = EUCTW_TABLE_SIZE self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, byte_str): + def get_order(self, byte_str: Union[bytes, bytearray]) -> int: # for euc-TW encoding, we are interested # first byte range: 0xc4 -- 0xfe # second byte range: 0xa1 -- 0xfe @@ -125,18 +144,17 @@ class EUCTWDistributionAnalysis(CharDistributionAnalysis): first_char = byte_str[0] if first_char >= 0xC4: return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1 - else: - return -1 + return -1 class EUCKRDistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(EUCKRDistributionAnalysis, self).__init__() + def __init__(self) -> None: + super().__init__() self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER self._table_size = EUCKR_TABLE_SIZE self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, byte_str): + def get_order(self, byte_str: Union[bytes, bytearray]) -> int: # for euc-KR encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe @@ -144,18 +162,32 @@ class EUCKRDistributionAnalysis(CharDistributionAnalysis): first_char = byte_str[0] if first_char >= 0xB0: return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1 - else: - return -1 + return -1 + + +class JOHABDistributionAnalysis(CharDistributionAnalysis): + def __init__(self) -> None: + super().__init__() + self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER + self._table_size = EUCKR_TABLE_SIZE + self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO + + def get_order(self, byte_str: Union[bytes, bytearray]) -> int: + first_char = byte_str[0] + if 0x88 <= first_char < 0xD4: + code = first_char * 256 + byte_str[1] + return JOHAB_TO_EUCKR_ORDER_TABLE.get(code, -1) + return -1 class GB2312DistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(GB2312DistributionAnalysis, self).__init__() + def __init__(self) -> None: + super().__init__() self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER self._table_size = GB2312_TABLE_SIZE self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, byte_str): + def get_order(self, byte_str: Union[bytes, bytearray]) -> int: # for GB2312 encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe @@ -163,18 +195,17 @@ class GB2312DistributionAnalysis(CharDistributionAnalysis): first_char, second_char = byte_str[0], byte_str[1] if (first_char >= 0xB0) and (second_char >= 0xA1): return 94 * (first_char - 0xB0) + second_char - 0xA1 - else: - return -1 + return -1 class Big5DistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(Big5DistributionAnalysis, self).__init__() + def __init__(self) -> None: + super().__init__() self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER self._table_size = BIG5_TABLE_SIZE self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, byte_str): + def get_order(self, byte_str: Union[bytes, bytearray]) -> int: # for big5 encoding, we are interested # first byte range: 0xa4 -- 0xfe # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe @@ -183,28 +214,26 @@ class Big5DistributionAnalysis(CharDistributionAnalysis): if first_char >= 0xA4: if second_char >= 0xA1: return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63 - else: - return 157 * (first_char - 0xA4) + second_char - 0x40 - else: - return -1 + return 157 * (first_char - 0xA4) + second_char - 0x40 + return -1 class SJISDistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(SJISDistributionAnalysis, self).__init__() + def __init__(self) -> None: + super().__init__() self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER self._table_size = JIS_TABLE_SIZE self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, byte_str): + def get_order(self, byte_str: Union[bytes, bytearray]) -> int: # for sjis encoding, we are interested # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe # no validation needed here. State machine has done that first_char, second_char = byte_str[0], byte_str[1] - if (first_char >= 0x81) and (first_char <= 0x9F): + if 0x81 <= first_char <= 0x9F: order = 188 * (first_char - 0x81) - elif (first_char >= 0xE0) and (first_char <= 0xEF): + elif 0xE0 <= first_char <= 0xEF: order = 188 * (first_char - 0xE0 + 31) else: return -1 @@ -215,19 +244,18 @@ class SJISDistributionAnalysis(CharDistributionAnalysis): class EUCJPDistributionAnalysis(CharDistributionAnalysis): - def __init__(self): - super(EUCJPDistributionAnalysis, self).__init__() + def __init__(self) -> None: + super().__init__() self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER self._table_size = JIS_TABLE_SIZE self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, byte_str): + def get_order(self, byte_str: Union[bytes, bytearray]) -> int: # for euc-JP encoding, we are interested # first byte range: 0xa0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that char = byte_str[0] if char >= 0xA0: - return 94 * (char - 0xA1) + byte_str[1] - 0xa1 - else: - return -1 + return 94 * (char - 0xA1) + byte_str[1] - 0xA1 + return -1 diff --git a/env/Lib/site-packages/pip/_vendor/chardet/charsetgroupprober.py b/env/Lib/site-packages/pip/_vendor/chardet/charsetgroupprober.py index 5812cef0b5924db9af2da77f0abe4e63decee4cf..6def56b4a75f67000ed8181ae2d2c40eefb645fb 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/charsetgroupprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/charsetgroupprober.py @@ -25,29 +25,30 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .enums import ProbingState +from typing import List, Optional, Union + from .charsetprober import CharSetProber +from .enums import LanguageFilter, ProbingState class CharSetGroupProber(CharSetProber): - def __init__(self, lang_filter=None): - super(CharSetGroupProber, self).__init__(lang_filter=lang_filter) + def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None: + super().__init__(lang_filter=lang_filter) self._active_num = 0 - self.probers = [] - self._best_guess_prober = None + self.probers: List[CharSetProber] = [] + self._best_guess_prober: Optional[CharSetProber] = None - def reset(self): - super(CharSetGroupProber, self).reset() + def reset(self) -> None: + super().reset() self._active_num = 0 for prober in self.probers: - if prober: - prober.reset() - prober.active = True - self._active_num += 1 + prober.reset() + prober.active = True + self._active_num += 1 self._best_guess_prober = None @property - def charset_name(self): + def charset_name(self) -> Optional[str]: if not self._best_guess_prober: self.get_confidence() if not self._best_guess_prober: @@ -55,17 +56,15 @@ class CharSetGroupProber(CharSetProber): return self._best_guess_prober.charset_name @property - def language(self): + def language(self) -> Optional[str]: if not self._best_guess_prober: self.get_confidence() if not self._best_guess_prober: return None return self._best_guess_prober.language - def feed(self, byte_str): + def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: for prober in self.probers: - if not prober: - continue if not prober.active: continue state = prober.feed(byte_str) @@ -75,7 +74,7 @@ class CharSetGroupProber(CharSetProber): self._best_guess_prober = prober self._state = ProbingState.FOUND_IT return self.state - elif state == ProbingState.NOT_ME: + if state == ProbingState.NOT_ME: prober.active = False self._active_num -= 1 if self._active_num <= 0: @@ -83,22 +82,22 @@ class CharSetGroupProber(CharSetProber): return self.state return self.state - def get_confidence(self): + def get_confidence(self) -> float: state = self.state if state == ProbingState.FOUND_IT: return 0.99 - elif state == ProbingState.NOT_ME: + if state == ProbingState.NOT_ME: return 0.01 best_conf = 0.0 self._best_guess_prober = None for prober in self.probers: - if not prober: - continue if not prober.active: - self.logger.debug('%s not active', prober.charset_name) + self.logger.debug("%s not active", prober.charset_name) continue conf = prober.get_confidence() - self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf) + self.logger.debug( + "%s %s confidence = %s", prober.charset_name, prober.language, conf + ) if best_conf < conf: best_conf = conf self._best_guess_prober = prober diff --git a/env/Lib/site-packages/pip/_vendor/chardet/charsetprober.py b/env/Lib/site-packages/pip/_vendor/chardet/charsetprober.py index eac4e5986578636ad414648e6015e8b7e9f10432..a103ca11356606402c03b320a4fcdb8635051623 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/charsetprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/charsetprober.py @@ -28,54 +28,62 @@ import logging import re +from typing import Optional, Union -from .enums import ProbingState +from .enums import LanguageFilter, ProbingState +INTERNATIONAL_WORDS_PATTERN = re.compile( + b"[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?" +) -class CharSetProber(object): + +class CharSetProber: SHORTCUT_THRESHOLD = 0.95 - def __init__(self, lang_filter=None): - self._state = None + def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None: + self._state = ProbingState.DETECTING + self.active = True self.lang_filter = lang_filter self.logger = logging.getLogger(__name__) - def reset(self): + def reset(self) -> None: self._state = ProbingState.DETECTING @property - def charset_name(self): + def charset_name(self) -> Optional[str]: return None - def feed(self, buf): - pass + @property + def language(self) -> Optional[str]: + raise NotImplementedError + + def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: + raise NotImplementedError @property - def state(self): + def state(self) -> ProbingState: return self._state - def get_confidence(self): + def get_confidence(self) -> float: return 0.0 @staticmethod - def filter_high_byte_only(buf): - buf = re.sub(b'([\x00-\x7F])+', b' ', buf) + def filter_high_byte_only(buf: Union[bytes, bytearray]) -> bytes: + buf = re.sub(b"([\x00-\x7F])+", b" ", buf) return buf @staticmethod - def filter_international_words(buf): + def filter_international_words(buf: Union[bytes, bytearray]) -> bytearray: """ We define three types of bytes: alphabet: english alphabets [a-zA-Z] international: international characters [\x80-\xFF] marker: everything else [^a-zA-Z\x80-\xFF] - The input buffer can be thought to contain a series of words delimited by markers. This function works to filter all words that contain at least one international character. All contiguous sequences of markers are replaced by a single space ascii character. - This filter applies to all scripts which do not use English characters. """ filtered = bytearray() @@ -83,8 +91,7 @@ class CharSetProber(object): # This regex expression filters out only words that have at-least one # international character. The word may include one marker character at # the end. - words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?', - buf) + words = INTERNATIONAL_WORDS_PATTERN.findall(buf) for word in words: filtered.extend(word[:-1]) @@ -94,20 +101,17 @@ class CharSetProber(object): # similarly across all languages and may thus have similar # frequencies). last_char = word[-1:] - if not last_char.isalpha() and last_char < b'\x80': - last_char = b' ' + if not last_char.isalpha() and last_char < b"\x80": + last_char = b" " filtered.extend(last_char) return filtered @staticmethod - def filter_with_english_letters(buf): + def remove_xml_tags(buf: Union[bytes, bytearray]) -> bytes: """ Returns a copy of ``buf`` that retains only the sequences of English alphabet and high byte characters that are not between <> characters. - Also retains English alphabet and high byte characters immediately - before occurrences of >. - This filter can be applied to all scripts which contain both English characters and extended ASCII characters, but is currently only used by ``Latin1Prober``. @@ -115,26 +119,24 @@ class CharSetProber(object): filtered = bytearray() in_tag = False prev = 0 + buf = memoryview(buf).cast("c") - for curr in range(len(buf)): - # Slice here to get bytes instead of an int with Python 3 - buf_char = buf[curr:curr + 1] - # Check if we're coming out of or entering an HTML tag - if buf_char == b'>': - in_tag = False - elif buf_char == b'<': - in_tag = True + for curr, buf_char in enumerate(buf): + # Check if we're coming out of or entering an XML tag - # If current character is not extended-ASCII and not alphabetic... - if buf_char < b'\x80' and not buf_char.isalpha(): - # ...and we're not in a tag + # https://github.com/python/typeshed/issues/8182 + if buf_char == b">": # type: ignore[comparison-overlap] + prev = curr + 1 + in_tag = False + # https://github.com/python/typeshed/issues/8182 + elif buf_char == b"<": # type: ignore[comparison-overlap] if curr > prev and not in_tag: # Keep everything after last non-extended-ASCII, # non-alphabetic character filtered.extend(buf[prev:curr]) # Output a space to delimit stretch we kept - filtered.extend(b' ') - prev = curr + 1 + filtered.extend(b" ") + in_tag = True # If we're not in a tag... if not in_tag: diff --git a/env/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py b/env/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py index 8b137891791fe96927ad78e64b0aad7bded08bdc..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py @@ -1 +0,0 @@ - diff --git a/env/Lib/site-packages/pip/_vendor/chardet/cli/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/cli/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 4dc854ddc02c16c1da3beebcc0db9d85c3155dac..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/cli/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/cli/__pycache__/chardetect.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/cli/__pycache__/chardetect.cpython-39.pyc deleted file mode 100644 index 51cff09663700751e744603566aab3f8423a8f17..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/cli/__pycache__/chardetect.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py b/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py index 6d6f93aabd44902caf086998226aa48fd3bbe221..43f6e144f677a113b5362dcbdfb75db4f41c2b2f 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py @@ -12,17 +12,21 @@ If no paths are provided, it takes its input from stdin. """ -from __future__ import absolute_import, print_function, unicode_literals import argparse import sys +from typing import Iterable, List, Optional -from pip._vendor.chardet import __version__ -from pip._vendor.chardet.compat import PY2 -from pip._vendor.chardet.universaldetector import UniversalDetector +from .. import __version__ +from ..universaldetector import UniversalDetector -def description_of(lines, name='stdin'): +def description_of( + lines: Iterable[bytes], + name: str = "stdin", + minimal: bool = False, + should_rename_legacy: bool = False, +) -> Optional[str]: """ Return a string describing the probable encoding of a file or list of strings. @@ -31,8 +35,11 @@ def description_of(lines, name='stdin'): :type lines: Iterable of bytes :param name: Name of file or collection of lines :type name: str + :param should_rename_legacy: Should we rename legacy encodings to + their more modern equivalents? + :type should_rename_legacy: ``bool`` """ - u = UniversalDetector() + u = UniversalDetector(should_rename_legacy=should_rename_legacy) for line in lines: line = bytearray(line) u.feed(line) @@ -41,16 +48,14 @@ def description_of(lines, name='stdin'): break u.close() result = u.result - if PY2: - name = name.decode(sys.getfilesystemencoding(), 'ignore') - if result['encoding']: - return '{}: {} with confidence {}'.format(name, result['encoding'], - result['confidence']) - else: - return '{}: no result'.format(name) + if minimal: + return result["encoding"] + if result["encoding"]: + return f'{name}: {result["encoding"]} with confidence {result["confidence"]}' + return f"{name}: no result" -def main(argv=None): +def main(argv: Optional[List[str]] = None) -> None: """ Handles command line arguments and gets things started. @@ -60,25 +65,48 @@ def main(argv=None): """ # Get command line arguments parser = argparse.ArgumentParser( - description="Takes one or more file paths and reports their detected \ - encodings") - parser.add_argument('input', - help='File whose encoding we would like to determine. \ - (default: stdin)', - type=argparse.FileType('rb'), nargs='*', - default=[sys.stdin if PY2 else sys.stdin.buffer]) - parser.add_argument('--version', action='version', - version='%(prog)s {}'.format(__version__)) + description=( + "Takes one or more file paths and reports their detected encodings" + ) + ) + parser.add_argument( + "input", + help="File whose encoding we would like to determine. (default: stdin)", + type=argparse.FileType("rb"), + nargs="*", + default=[sys.stdin.buffer], + ) + parser.add_argument( + "--minimal", + help="Print only the encoding to standard output", + action="store_true", + ) + parser.add_argument( + "-l", + "--legacy", + help="Rename legacy encodings to more modern ones.", + action="store_true", + ) + parser.add_argument( + "--version", action="version", version=f"%(prog)s {__version__}" + ) args = parser.parse_args(argv) for f in args.input: if f.isatty(): - print("You are running chardetect interactively. Press " + - "CTRL-D twice at the start of a blank line to signal the " + - "end of your input. If you want help, run chardetect " + - "--help\n", file=sys.stderr) - print(description_of(f, f.name)) - - -if __name__ == '__main__': + print( + "You are running chardetect interactively. Press " + "CTRL-D twice at the start of a blank line to signal the " + "end of your input. If you want help, run chardetect " + "--help\n", + file=sys.stderr, + ) + print( + description_of( + f, f.name, minimal=args.minimal, should_rename_legacy=args.legacy + ) + ) + + +if __name__ == "__main__": main() diff --git a/env/Lib/site-packages/pip/_vendor/chardet/codingstatemachine.py b/env/Lib/site-packages/pip/_vendor/chardet/codingstatemachine.py index 68fba44f14366c448f13db3cf9cf1665af2e498c..8ed4a8773b8404c2705aa8728e5fd692362ba168 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/codingstatemachine.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/codingstatemachine.py @@ -27,10 +27,11 @@ import logging +from .codingstatemachinedict import CodingStateMachineDict from .enums import MachineState -class CodingStateMachine(object): +class CodingStateMachine: """ A state machine to verify a byte sequence for a particular encoding. For each byte the detector receives, it will feed that byte to every active @@ -52,37 +53,38 @@ class CodingStateMachine(object): negative answer for this encoding. Detector will exclude this encoding from consideration from here on. """ - def __init__(self, sm): + + def __init__(self, sm: CodingStateMachineDict) -> None: self._model = sm self._curr_byte_pos = 0 self._curr_char_len = 0 - self._curr_state = None + self._curr_state = MachineState.START + self.active = True self.logger = logging.getLogger(__name__) self.reset() - def reset(self): + def reset(self) -> None: self._curr_state = MachineState.START - def next_state(self, c): + def next_state(self, c: int) -> int: # for each byte we get its class # if it is first byte, we also get byte length - byte_class = self._model['class_table'][c] + byte_class = self._model["class_table"][c] if self._curr_state == MachineState.START: self._curr_byte_pos = 0 - self._curr_char_len = self._model['char_len_table'][byte_class] + self._curr_char_len = self._model["char_len_table"][byte_class] # from byte's class and state_table, we get its next state - curr_state = (self._curr_state * self._model['class_factor'] - + byte_class) - self._curr_state = self._model['state_table'][curr_state] + curr_state = self._curr_state * self._model["class_factor"] + byte_class + self._curr_state = self._model["state_table"][curr_state] self._curr_byte_pos += 1 return self._curr_state - def get_current_charlen(self): + def get_current_charlen(self) -> int: return self._curr_char_len - def get_coding_state_machine(self): - return self._model['name'] + def get_coding_state_machine(self) -> str: + return self._model["name"] @property - def language(self): - return self._model['language'] + def language(self) -> str: + return self._model["language"] diff --git a/env/Lib/site-packages/pip/_vendor/chardet/compat.py b/env/Lib/site-packages/pip/_vendor/chardet/compat.py deleted file mode 100644 index 8941572b3e6a2a2267659ed74e25099c37aae90b..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/chardet/compat.py +++ /dev/null @@ -1,36 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# Contributor(s): -# Dan Blanchard -# Ian Cordasco -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -import sys - - -if sys.version_info < (3, 0): - PY2 = True - PY3 = False - string_types = (str, unicode) - text_type = unicode - iteritems = dict.iteritems -else: - PY2 = False - PY3 = True - string_types = (bytes, str) - text_type = str - iteritems = dict.items diff --git a/env/Lib/site-packages/pip/_vendor/chardet/cp949prober.py b/env/Lib/site-packages/pip/_vendor/chardet/cp949prober.py index efd793abca4bf496001a4e46a67557e5a6f16bba..fa7307ed8985ad7e318660da0066440f890d1624 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/cp949prober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/cp949prober.py @@ -32,8 +32,8 @@ from .mbcssm import CP949_SM_MODEL class CP949Prober(MultiByteCharSetProber): - def __init__(self): - super(CP949Prober, self).__init__() + def __init__(self) -> None: + super().__init__() self.coding_sm = CodingStateMachine(CP949_SM_MODEL) # NOTE: CP949 is a superset of EUC-KR, so the distribution should be # not different. @@ -41,9 +41,9 @@ class CP949Prober(MultiByteCharSetProber): self.reset() @property - def charset_name(self): + def charset_name(self) -> str: return "CP949" @property - def language(self): + def language(self) -> str: return "Korean" diff --git a/env/Lib/site-packages/pip/_vendor/chardet/enums.py b/env/Lib/site-packages/pip/_vendor/chardet/enums.py index 04512072251c429e63ed110cdbafaf4b3cca3412..5e3e198233698f2b007489dd299cecb87d971067 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/enums.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/enums.py @@ -4,21 +4,26 @@ All of the Enums that are used throughout the chardet package. :author: Dan Blanchard (dan.blanchard@gmail.com) """ +from enum import Enum, Flag -class InputState(object): + +class InputState: """ This enum represents the different states a universal detector can be in. """ + PURE_ASCII = 0 ESC_ASCII = 1 HIGH_BYTE = 2 -class LanguageFilter(object): +class LanguageFilter(Flag): """ This enum represents the different language filters we can apply to a ``UniversalDetector``. """ + + NONE = 0x00 CHINESE_SIMPLIFIED = 0x01 CHINESE_TRADITIONAL = 0x02 JAPANESE = 0x04 @@ -29,46 +34,50 @@ class LanguageFilter(object): CJK = CHINESE | JAPANESE | KOREAN -class ProbingState(object): +class ProbingState(Enum): """ This enum represents the different states a prober can be in. """ + DETECTING = 0 FOUND_IT = 1 NOT_ME = 2 -class MachineState(object): +class MachineState: """ This enum represents the different states a state machine can be in. """ + START = 0 ERROR = 1 ITS_ME = 2 -class SequenceLikelihood(object): +class SequenceLikelihood: """ This enum represents the likelihood of a character following the previous one. """ + NEGATIVE = 0 UNLIKELY = 1 LIKELY = 2 POSITIVE = 3 @classmethod - def get_num_categories(cls): + def get_num_categories(cls) -> int: """:returns: The number of likelihood categories in the enum.""" return 4 -class CharacterCategory(object): +class CharacterCategory: """ This enum represents the different categories language models for ``SingleByteCharsetProber`` put characters into. Anything less than CONTROL is considered a letter. """ + UNDEFINED = 255 LINE_BREAK = 254 SYMBOL = 253 diff --git a/env/Lib/site-packages/pip/_vendor/chardet/escprober.py b/env/Lib/site-packages/pip/_vendor/chardet/escprober.py index c70493f2b131b32378612044f30173eabbfbc3f4..fd713830d36cabc6a0fb4ab4e8cf426a84decdc6 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/escprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/escprober.py @@ -25,11 +25,17 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +from typing import Optional, Union + from .charsetprober import CharSetProber from .codingstatemachine import CodingStateMachine -from .enums import LanguageFilter, ProbingState, MachineState -from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL, - ISO2022KR_SM_MODEL) +from .enums import LanguageFilter, MachineState, ProbingState +from .escsm import ( + HZ_SM_MODEL, + ISO2022CN_SM_MODEL, + ISO2022JP_SM_MODEL, + ISO2022KR_SM_MODEL, +) class EscCharSetProber(CharSetProber): @@ -39,8 +45,8 @@ class EscCharSetProber(CharSetProber): identify these encodings. """ - def __init__(self, lang_filter=None): - super(EscCharSetProber, self).__init__(lang_filter=lang_filter) + def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None: + super().__init__(lang_filter=lang_filter) self.coding_sm = [] if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED: self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL)) @@ -49,17 +55,15 @@ class EscCharSetProber(CharSetProber): self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL)) if self.lang_filter & LanguageFilter.KOREAN: self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL)) - self.active_sm_count = None - self._detected_charset = None - self._detected_language = None - self._state = None + self.active_sm_count = 0 + self._detected_charset: Optional[str] = None + self._detected_language: Optional[str] = None + self._state = ProbingState.DETECTING self.reset() - def reset(self): - super(EscCharSetProber, self).reset() + def reset(self) -> None: + super().reset() for coding_sm in self.coding_sm: - if not coding_sm: - continue coding_sm.active = True coding_sm.reset() self.active_sm_count = len(self.coding_sm) @@ -67,23 +71,20 @@ class EscCharSetProber(CharSetProber): self._detected_language = None @property - def charset_name(self): + def charset_name(self) -> Optional[str]: return self._detected_charset @property - def language(self): + def language(self) -> Optional[str]: return self._detected_language - def get_confidence(self): - if self._detected_charset: - return 0.99 - else: - return 0.00 + def get_confidence(self) -> float: + return 0.99 if self._detected_charset else 0.00 - def feed(self, byte_str): + def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: for c in byte_str: for coding_sm in self.coding_sm: - if not coding_sm or not coding_sm.active: + if not coding_sm.active: continue coding_state = coding_sm.next_state(c) if coding_state == MachineState.ERROR: diff --git a/env/Lib/site-packages/pip/_vendor/chardet/escsm.py b/env/Lib/site-packages/pip/_vendor/chardet/escsm.py index 0069523a04969dd920ea4b43a497162157729174..11d4adf771f3f90bb5f1cc11043599b48e955c22 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/escsm.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/escsm.py @@ -12,7 +12,7 @@ # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. +# version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -20,227 +20,242 @@ # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +from .codingstatemachinedict import CodingStateMachineDict from .enums import MachineState +# fmt: off HZ_CLS = ( -1,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,0,0, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,0,0,0,0, # 20 - 27 -0,0,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -0,0,0,0,0,0,0,0, # 40 - 47 -0,0,0,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,4,0,5,2,0, # 78 - 7f -1,1,1,1,1,1,1,1, # 80 - 87 -1,1,1,1,1,1,1,1, # 88 - 8f -1,1,1,1,1,1,1,1, # 90 - 97 -1,1,1,1,1,1,1,1, # 98 - 9f -1,1,1,1,1,1,1,1, # a0 - a7 -1,1,1,1,1,1,1,1, # a8 - af -1,1,1,1,1,1,1,1, # b0 - b7 -1,1,1,1,1,1,1,1, # b8 - bf -1,1,1,1,1,1,1,1, # c0 - c7 -1,1,1,1,1,1,1,1, # c8 - cf -1,1,1,1,1,1,1,1, # d0 - d7 -1,1,1,1,1,1,1,1, # d8 - df -1,1,1,1,1,1,1,1, # e0 - e7 -1,1,1,1,1,1,1,1, # e8 - ef -1,1,1,1,1,1,1,1, # f0 - f7 -1,1,1,1,1,1,1,1, # f8 - ff + 1, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 + 0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f + 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17 + 0, 0, 0, 1, 0, 0, 0, 0, # 18 - 1f + 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27 + 0, 0, 0, 0, 0, 0, 0, 0, # 28 - 2f + 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37 + 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f + 0, 0, 0, 0, 0, 0, 0, 0, # 40 - 47 + 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f + 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57 + 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f + 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67 + 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f + 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77 + 0, 0, 0, 4, 0, 5, 2, 0, # 78 - 7f + 1, 1, 1, 1, 1, 1, 1, 1, # 80 - 87 + 1, 1, 1, 1, 1, 1, 1, 1, # 88 - 8f + 1, 1, 1, 1, 1, 1, 1, 1, # 90 - 97 + 1, 1, 1, 1, 1, 1, 1, 1, # 98 - 9f + 1, 1, 1, 1, 1, 1, 1, 1, # a0 - a7 + 1, 1, 1, 1, 1, 1, 1, 1, # a8 - af + 1, 1, 1, 1, 1, 1, 1, 1, # b0 - b7 + 1, 1, 1, 1, 1, 1, 1, 1, # b8 - bf + 1, 1, 1, 1, 1, 1, 1, 1, # c0 - c7 + 1, 1, 1, 1, 1, 1, 1, 1, # c8 - cf + 1, 1, 1, 1, 1, 1, 1, 1, # d0 - d7 + 1, 1, 1, 1, 1, 1, 1, 1, # d8 - df + 1, 1, 1, 1, 1, 1, 1, 1, # e0 - e7 + 1, 1, 1, 1, 1, 1, 1, 1, # e8 - ef + 1, 1, 1, 1, 1, 1, 1, 1, # f0 - f7 + 1, 1, 1, 1, 1, 1, 1, 1, # f8 - ff ) HZ_ST = ( -MachineState.START,MachineState.ERROR, 3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f -MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START, 4,MachineState.ERROR,# 10-17 - 5,MachineState.ERROR, 6,MachineState.ERROR, 5, 5, 4,MachineState.ERROR,# 18-1f - 4,MachineState.ERROR, 4, 4, 4,MachineState.ERROR, 4,MachineState.ERROR,# 20-27 - 4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f +MachineState.START, MachineState.ERROR, 3, MachineState.START, MachineState.START, MachineState.START, MachineState.ERROR, MachineState.ERROR, # 00-07 +MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 08-0f +MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.START, MachineState.START, 4, MachineState.ERROR, # 10-17 + 5, MachineState.ERROR, 6, MachineState.ERROR, 5, 5, 4, MachineState.ERROR, # 18-1f + 4, MachineState.ERROR, 4, 4, 4, MachineState.ERROR, 4, MachineState.ERROR, # 20-27 + 4, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 28-2f ) +# fmt: on HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0) -HZ_SM_MODEL = {'class_table': HZ_CLS, - 'class_factor': 6, - 'state_table': HZ_ST, - 'char_len_table': HZ_CHAR_LEN_TABLE, - 'name': "HZ-GB-2312", - 'language': 'Chinese'} +HZ_SM_MODEL: CodingStateMachineDict = { + "class_table": HZ_CLS, + "class_factor": 6, + "state_table": HZ_ST, + "char_len_table": HZ_CHAR_LEN_TABLE, + "name": "HZ-GB-2312", + "language": "Chinese", +} +# fmt: off ISO2022CN_CLS = ( -2,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,0,0, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,0,0,0,0, # 20 - 27 -0,3,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -0,0,0,4,0,0,0,0, # 40 - 47 -0,0,0,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,0,0,0,0,0, # 78 - 7f -2,2,2,2,2,2,2,2, # 80 - 87 -2,2,2,2,2,2,2,2, # 88 - 8f -2,2,2,2,2,2,2,2, # 90 - 97 -2,2,2,2,2,2,2,2, # 98 - 9f -2,2,2,2,2,2,2,2, # a0 - a7 -2,2,2,2,2,2,2,2, # a8 - af -2,2,2,2,2,2,2,2, # b0 - b7 -2,2,2,2,2,2,2,2, # b8 - bf -2,2,2,2,2,2,2,2, # c0 - c7 -2,2,2,2,2,2,2,2, # c8 - cf -2,2,2,2,2,2,2,2, # d0 - d7 -2,2,2,2,2,2,2,2, # d8 - df -2,2,2,2,2,2,2,2, # e0 - e7 -2,2,2,2,2,2,2,2, # e8 - ef -2,2,2,2,2,2,2,2, # f0 - f7 -2,2,2,2,2,2,2,2, # f8 - ff + 2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 + 0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f + 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17 + 0, 0, 0, 1, 0, 0, 0, 0, # 18 - 1f + 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27 + 0, 3, 0, 0, 0, 0, 0, 0, # 28 - 2f + 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37 + 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f + 0, 0, 0, 4, 0, 0, 0, 0, # 40 - 47 + 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f + 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57 + 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f + 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67 + 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f + 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77 + 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f + 2, 2, 2, 2, 2, 2, 2, 2, # 80 - 87 + 2, 2, 2, 2, 2, 2, 2, 2, # 88 - 8f + 2, 2, 2, 2, 2, 2, 2, 2, # 90 - 97 + 2, 2, 2, 2, 2, 2, 2, 2, # 98 - 9f + 2, 2, 2, 2, 2, 2, 2, 2, # a0 - a7 + 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af + 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7 + 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf + 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7 + 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf + 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7 + 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df + 2, 2, 2, 2, 2, 2, 2, 2, # e0 - e7 + 2, 2, 2, 2, 2, 2, 2, 2, # e8 - ef + 2, 2, 2, 2, 2, 2, 2, 2, # f0 - f7 + 2, 2, 2, 2, 2, 2, 2, 2, # f8 - ff ) ISO2022CN_ST = ( -MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07 -MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f -MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17 -MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,# 18-1f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27 - 5, 6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f + MachineState.START, 3, MachineState.ERROR, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 00-07 + MachineState.START, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 08-0f + MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 10-17 + MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 4, MachineState.ERROR, # 18-1f + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 20-27 + 5, 6, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 28-2f + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 30-37 + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, # 38-3f ) +# fmt: on ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0) -ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS, - 'class_factor': 9, - 'state_table': ISO2022CN_ST, - 'char_len_table': ISO2022CN_CHAR_LEN_TABLE, - 'name': "ISO-2022-CN", - 'language': 'Chinese'} +ISO2022CN_SM_MODEL: CodingStateMachineDict = { + "class_table": ISO2022CN_CLS, + "class_factor": 9, + "state_table": ISO2022CN_ST, + "char_len_table": ISO2022CN_CHAR_LEN_TABLE, + "name": "ISO-2022-CN", + "language": "Chinese", +} +# fmt: off ISO2022JP_CLS = ( -2,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,2,2, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,7,0,0,0, # 20 - 27 -3,0,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -6,0,4,0,8,0,0,0, # 40 - 47 -0,9,5,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,0,0,0,0,0, # 78 - 7f -2,2,2,2,2,2,2,2, # 80 - 87 -2,2,2,2,2,2,2,2, # 88 - 8f -2,2,2,2,2,2,2,2, # 90 - 97 -2,2,2,2,2,2,2,2, # 98 - 9f -2,2,2,2,2,2,2,2, # a0 - a7 -2,2,2,2,2,2,2,2, # a8 - af -2,2,2,2,2,2,2,2, # b0 - b7 -2,2,2,2,2,2,2,2, # b8 - bf -2,2,2,2,2,2,2,2, # c0 - c7 -2,2,2,2,2,2,2,2, # c8 - cf -2,2,2,2,2,2,2,2, # d0 - d7 -2,2,2,2,2,2,2,2, # d8 - df -2,2,2,2,2,2,2,2, # e0 - e7 -2,2,2,2,2,2,2,2, # e8 - ef -2,2,2,2,2,2,2,2, # f0 - f7 -2,2,2,2,2,2,2,2, # f8 - ff + 2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 + 0, 0, 0, 0, 0, 0, 2, 2, # 08 - 0f + 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17 + 0, 0, 0, 1, 0, 0, 0, 0, # 18 - 1f + 0, 0, 0, 0, 7, 0, 0, 0, # 20 - 27 + 3, 0, 0, 0, 0, 0, 0, 0, # 28 - 2f + 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37 + 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f + 6, 0, 4, 0, 8, 0, 0, 0, # 40 - 47 + 0, 9, 5, 0, 0, 0, 0, 0, # 48 - 4f + 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57 + 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f + 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67 + 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f + 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77 + 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f + 2, 2, 2, 2, 2, 2, 2, 2, # 80 - 87 + 2, 2, 2, 2, 2, 2, 2, 2, # 88 - 8f + 2, 2, 2, 2, 2, 2, 2, 2, # 90 - 97 + 2, 2, 2, 2, 2, 2, 2, 2, # 98 - 9f + 2, 2, 2, 2, 2, 2, 2, 2, # a0 - a7 + 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af + 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7 + 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf + 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7 + 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf + 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7 + 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df + 2, 2, 2, 2, 2, 2, 2, 2, # e0 - e7 + 2, 2, 2, 2, 2, 2, 2, 2, # e8 - ef + 2, 2, 2, 2, 2, 2, 2, 2, # f0 - f7 + 2, 2, 2, 2, 2, 2, 2, 2, # f8 - ff ) ISO2022JP_ST = ( -MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07 -MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17 -MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f -MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 20-27 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47 + MachineState.START, 3, MachineState.ERROR, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 00-07 + MachineState.START, MachineState.START, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 08-0f + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 10-17 + MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, # 18-1f + MachineState.ERROR, 5, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 4, MachineState.ERROR, MachineState.ERROR, # 20-27 + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 6, MachineState.ITS_ME, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, # 28-2f + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, # 30-37 + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 38-3f + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, MachineState.START, # 40-47 ) +# fmt: on ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0) -ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS, - 'class_factor': 10, - 'state_table': ISO2022JP_ST, - 'char_len_table': ISO2022JP_CHAR_LEN_TABLE, - 'name': "ISO-2022-JP", - 'language': 'Japanese'} +ISO2022JP_SM_MODEL: CodingStateMachineDict = { + "class_table": ISO2022JP_CLS, + "class_factor": 10, + "state_table": ISO2022JP_ST, + "char_len_table": ISO2022JP_CHAR_LEN_TABLE, + "name": "ISO-2022-JP", + "language": "Japanese", +} +# fmt: off ISO2022KR_CLS = ( -2,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,0,0, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,3,0,0,0, # 20 - 27 -0,4,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -0,0,0,5,0,0,0,0, # 40 - 47 -0,0,0,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,0,0,0,0,0, # 78 - 7f -2,2,2,2,2,2,2,2, # 80 - 87 -2,2,2,2,2,2,2,2, # 88 - 8f -2,2,2,2,2,2,2,2, # 90 - 97 -2,2,2,2,2,2,2,2, # 98 - 9f -2,2,2,2,2,2,2,2, # a0 - a7 -2,2,2,2,2,2,2,2, # a8 - af -2,2,2,2,2,2,2,2, # b0 - b7 -2,2,2,2,2,2,2,2, # b8 - bf -2,2,2,2,2,2,2,2, # c0 - c7 -2,2,2,2,2,2,2,2, # c8 - cf -2,2,2,2,2,2,2,2, # d0 - d7 -2,2,2,2,2,2,2,2, # d8 - df -2,2,2,2,2,2,2,2, # e0 - e7 -2,2,2,2,2,2,2,2, # e8 - ef -2,2,2,2,2,2,2,2, # f0 - f7 -2,2,2,2,2,2,2,2, # f8 - ff + 2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 + 0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f + 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17 + 0, 0, 0, 1, 0, 0, 0, 0, # 18 - 1f + 0, 0, 0, 0, 3, 0, 0, 0, # 20 - 27 + 0, 4, 0, 0, 0, 0, 0, 0, # 28 - 2f + 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37 + 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f + 0, 0, 0, 5, 0, 0, 0, 0, # 40 - 47 + 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f + 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57 + 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f + 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67 + 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f + 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77 + 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f + 2, 2, 2, 2, 2, 2, 2, 2, # 80 - 87 + 2, 2, 2, 2, 2, 2, 2, 2, # 88 - 8f + 2, 2, 2, 2, 2, 2, 2, 2, # 90 - 97 + 2, 2, 2, 2, 2, 2, 2, 2, # 98 - 9f + 2, 2, 2, 2, 2, 2, 2, 2, # a0 - a7 + 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af + 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7 + 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf + 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7 + 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf + 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7 + 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df + 2, 2, 2, 2, 2, 2, 2, 2, # e0 - e7 + 2, 2, 2, 2, 2, 2, 2, 2, # e8 - ef + 2, 2, 2, 2, 2, 2, 2, 2, # f0 - f7 + 2, 2, 2, 2, 2, 2, 2, 2, # f8 - ff ) ISO2022KR_ST = ( -MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f -MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 10-17 -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f -MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27 + MachineState.START, 3, MachineState.ERROR, MachineState.START, MachineState.START, MachineState.START, MachineState.ERROR, MachineState.ERROR, # 00-07 + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 08-0f + MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 4, MachineState.ERROR, MachineState.ERROR, # 10-17 + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 5, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 18-1f + MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 20-27 ) +# fmt: on ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0) -ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS, - 'class_factor': 6, - 'state_table': ISO2022KR_ST, - 'char_len_table': ISO2022KR_CHAR_LEN_TABLE, - 'name': "ISO-2022-KR", - 'language': 'Korean'} - - +ISO2022KR_SM_MODEL: CodingStateMachineDict = { + "class_table": ISO2022KR_CLS, + "class_factor": 6, + "state_table": ISO2022KR_ST, + "char_len_table": ISO2022KR_CHAR_LEN_TABLE, + "name": "ISO-2022-KR", + "language": "Korean", +} diff --git a/env/Lib/site-packages/pip/_vendor/chardet/eucjpprober.py b/env/Lib/site-packages/pip/_vendor/chardet/eucjpprober.py index 20ce8f7d15bad9d48a3e0363de2286093a4a28cc..39487f4098d7c2068b67d7d3dd85b61848974a23 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/eucjpprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/eucjpprober.py @@ -25,68 +25,78 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .enums import ProbingState, MachineState -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine +from typing import Union + from .chardistribution import EUCJPDistributionAnalysis +from .codingstatemachine import CodingStateMachine +from .enums import MachineState, ProbingState from .jpcntx import EUCJPContextAnalysis +from .mbcharsetprober import MultiByteCharSetProber from .mbcssm import EUCJP_SM_MODEL class EUCJPProber(MultiByteCharSetProber): - def __init__(self): - super(EUCJPProber, self).__init__() + def __init__(self) -> None: + super().__init__() self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL) self.distribution_analyzer = EUCJPDistributionAnalysis() self.context_analyzer = EUCJPContextAnalysis() self.reset() - def reset(self): - super(EUCJPProber, self).reset() + def reset(self) -> None: + super().reset() self.context_analyzer.reset() @property - def charset_name(self): + def charset_name(self) -> str: return "EUC-JP" @property - def language(self): + def language(self) -> str: return "Japanese" - def feed(self, byte_str): - for i in range(len(byte_str)): - # PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte - coding_state = self.coding_sm.next_state(byte_str[i]) + def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: + assert self.coding_sm is not None + assert self.distribution_analyzer is not None + + for i, byte in enumerate(byte_str): + # PY3K: byte_str is a byte array, so byte is an int, not a byte + coding_state = self.coding_sm.next_state(byte) if coding_state == MachineState.ERROR: - self.logger.debug('%s %s prober hit error at byte %s', - self.charset_name, self.language, i) + self.logger.debug( + "%s %s prober hit error at byte %s", + self.charset_name, + self.language, + i, + ) self._state = ProbingState.NOT_ME break - elif coding_state == MachineState.ITS_ME: + if coding_state == MachineState.ITS_ME: self._state = ProbingState.FOUND_IT break - elif coding_state == MachineState.START: + if coding_state == MachineState.START: char_len = self.coding_sm.get_current_charlen() if i == 0: - self._last_char[1] = byte_str[0] + self._last_char[1] = byte self.context_analyzer.feed(self._last_char, char_len) self.distribution_analyzer.feed(self._last_char, char_len) else: - self.context_analyzer.feed(byte_str[i - 1:i + 1], - char_len) - self.distribution_analyzer.feed(byte_str[i - 1:i + 1], - char_len) + self.context_analyzer.feed(byte_str[i - 1 : i + 1], char_len) + self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len) self._last_char[0] = byte_str[-1] if self.state == ProbingState.DETECTING: - if (self.context_analyzer.got_enough_data() and - (self.get_confidence() > self.SHORTCUT_THRESHOLD)): + if self.context_analyzer.got_enough_data() and ( + self.get_confidence() > self.SHORTCUT_THRESHOLD + ): self._state = ProbingState.FOUND_IT return self.state - def get_confidence(self): + def get_confidence(self) -> float: + assert self.distribution_analyzer is not None + context_conf = self.context_analyzer.get_confidence() distrib_conf = self.distribution_analyzer.get_confidence() return max(context_conf, distrib_conf) diff --git a/env/Lib/site-packages/pip/_vendor/chardet/euckrfreq.py b/env/Lib/site-packages/pip/_vendor/chardet/euckrfreq.py index b68078cb9680de4cca65b1145632a37c5e751c38..7dc3b10387d1c3d2da8b4e27e917ee2a85086e0c 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/euckrfreq.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/euckrfreq.py @@ -43,6 +43,7 @@ EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0 EUCKR_TABLE_SIZE = 2352 # Char to FreqOrder table , +# fmt: off EUCKR_CHAR_TO_FREQ_ORDER = ( 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87, 1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398, @@ -192,4 +193,4 @@ EUCKR_CHAR_TO_FREQ_ORDER = ( 2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042, 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256 ) - +# fmt: on diff --git a/env/Lib/site-packages/pip/_vendor/chardet/euckrprober.py b/env/Lib/site-packages/pip/_vendor/chardet/euckrprober.py index 345a060d0230ada58f769b0f6b55f43a428fc3bd..1fc5de0462cd9a09472cece4087cafe699da4fa7 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/euckrprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/euckrprober.py @@ -25,23 +25,23 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine from .chardistribution import EUCKRDistributionAnalysis +from .codingstatemachine import CodingStateMachine +from .mbcharsetprober import MultiByteCharSetProber from .mbcssm import EUCKR_SM_MODEL class EUCKRProber(MultiByteCharSetProber): - def __init__(self): - super(EUCKRProber, self).__init__() + def __init__(self) -> None: + super().__init__() self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL) self.distribution_analyzer = EUCKRDistributionAnalysis() self.reset() @property - def charset_name(self): + def charset_name(self) -> str: return "EUC-KR" @property - def language(self): + def language(self) -> str: return "Korean" diff --git a/env/Lib/site-packages/pip/_vendor/chardet/euctwfreq.py b/env/Lib/site-packages/pip/_vendor/chardet/euctwfreq.py index ed7a995a3aa311583efa5a47e316d4490e5f3463..4900ccc160a1dbf4de3a01c234735c21dd4417d6 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/euctwfreq.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/euctwfreq.py @@ -43,345 +43,346 @@ EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 -# Char to FreqOrder table , +# Char to FreqOrder table EUCTW_TABLE_SIZE = 5376 +# fmt: off EUCTW_CHAR_TO_FREQ_ORDER = ( - 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742 -3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758 -1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774 - 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790 -3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806 -4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822 -7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838 - 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854 - 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870 - 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886 -2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902 -1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918 -3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934 - 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950 -1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966 -3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982 -2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998 - 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014 -3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030 -1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046 -7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062 - 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078 -7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094 -1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110 - 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126 - 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142 -3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158 -3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174 - 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190 -2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206 -2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222 - 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238 - 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254 -3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270 -1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286 -1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302 -1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318 -2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334 - 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350 -4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366 -1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382 -7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398 -2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414 - 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430 - 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446 - 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462 - 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478 -7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494 - 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510 -1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526 - 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542 - 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558 -7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574 -1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590 - 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606 -3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622 -4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638 -3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654 - 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670 - 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686 -1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702 -4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718 -3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734 -3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750 -2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766 -7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782 -3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798 -7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814 -1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830 -2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846 -1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862 - 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878 -1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894 -4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910 -3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926 - 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942 - 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958 - 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974 -2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990 -7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006 -1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022 -2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038 -1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054 -1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070 -7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086 -7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102 -7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118 -3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134 -4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150 -1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166 -7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182 -2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198 -7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214 -3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230 -3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246 -7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262 -2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278 -7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294 - 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310 -4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326 -2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342 -7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358 -3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374 -2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390 -2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406 - 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422 -2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438 -1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454 -1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470 -2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486 -1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502 -7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518 -7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534 -2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550 -4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566 -1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582 -7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598 - 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614 -4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630 - 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646 -2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662 - 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678 -1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694 -1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710 - 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726 -3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742 -3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758 -1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774 -3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790 -7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806 -7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822 -1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838 -2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854 -1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870 -3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886 -2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902 -3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918 -2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934 -4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950 -4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966 -3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982 - 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998 -3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014 - 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030 -3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046 -3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062 -3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078 -1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094 -7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110 - 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126 -7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142 -1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158 - 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174 -4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190 -3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206 - 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222 -2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238 -2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254 -3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270 -1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286 -4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302 -2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318 -1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334 -1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350 -2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366 -3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382 -1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398 -7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414 -1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430 -4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446 -1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462 - 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478 -1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494 -3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510 -3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526 -2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542 -1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558 -4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574 - 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590 -7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606 -2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622 -3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638 -4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654 - 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670 -7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686 -7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702 -1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718 -4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734 -3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750 -2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766 -3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782 -3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798 -2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814 -1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830 -4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846 -3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862 -3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878 -2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894 -4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910 -7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926 -3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942 -2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958 -3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974 -1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990 -2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006 -3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022 -4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038 -2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054 -2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070 -7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086 -1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102 -2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118 -1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134 -3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150 -4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166 -2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182 -3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198 -3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214 -2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230 -4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246 -2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262 -3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278 -4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294 -7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310 -3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326 - 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342 -1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358 -4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374 -1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390 -4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406 -7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422 - 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438 -7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454 -2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470 -1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486 -1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502 -3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518 - 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534 - 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550 - 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566 -3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582 -2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598 - 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614 -7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630 -1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646 -3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662 -7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678 -1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694 -7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710 -4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726 -1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742 -2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758 -2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774 -4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790 - 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806 - 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822 -3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838 -3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854 -1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870 -2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886 -7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902 -1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918 -1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934 -3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950 - 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966 -1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982 -4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998 -7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014 -2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030 -3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046 - 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062 -1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078 -2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094 -2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110 -7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126 -7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142 -7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158 -2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174 -2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190 -1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206 -4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222 -3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238 -3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254 -4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270 -4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286 -2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302 -2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318 -7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334 -4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350 -7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366 -2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382 -1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398 -3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414 -4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430 -2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446 - 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462 -2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478 -1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494 -2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510 -2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526 -4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542 -7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558 -1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574 -3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590 -7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606 -1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622 -8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638 -2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654 -8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670 -2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686 -2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702 -8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718 -8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734 -8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750 - 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766 -8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782 -4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798 -3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814 -8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830 -1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846 -8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862 - 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878 -1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894 - 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910 -4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926 -1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942 -4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958 -1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974 - 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990 -3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006 -4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022 -8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038 - 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054 -3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070 - 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086 -2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102 + 1, 1800, 1506, 255, 1431, 198, 9, 82, 6, 7310, 177, 202, 3615, 1256, 2808, 110, # 2742 + 3735, 33, 3241, 261, 76, 44, 2113, 16, 2931, 2184, 1176, 659, 3868, 26, 3404, 2643, # 2758 + 1198, 3869, 3313, 4060, 410, 2211, 302, 590, 361, 1963, 8, 204, 58, 4296, 7311, 1931, # 2774 + 63, 7312, 7313, 317, 1614, 75, 222, 159, 4061, 2412, 1480, 7314, 3500, 3068, 224, 2809, # 2790 + 3616, 3, 10, 3870, 1471, 29, 2774, 1135, 2852, 1939, 873, 130, 3242, 1123, 312, 7315, # 2806 + 4297, 2051, 507, 252, 682, 7316, 142, 1914, 124, 206, 2932, 34, 3501, 3173, 64, 604, # 2822 + 7317, 2494, 1976, 1977, 155, 1990, 645, 641, 1606, 7318, 3405, 337, 72, 406, 7319, 80, # 2838 + 630, 238, 3174, 1509, 263, 939, 1092, 2644, 756, 1440, 1094, 3406, 449, 69, 2969, 591, # 2854 + 179, 2095, 471, 115, 2034, 1843, 60, 50, 2970, 134, 806, 1868, 734, 2035, 3407, 180, # 2870 + 995, 1607, 156, 537, 2893, 688, 7320, 319, 1305, 779, 2144, 514, 2374, 298, 4298, 359, # 2886 + 2495, 90, 2707, 1338, 663, 11, 906, 1099, 2545, 20, 2436, 182, 532, 1716, 7321, 732, # 2902 + 1376, 4062, 1311, 1420, 3175, 25, 2312, 1056, 113, 399, 382, 1949, 242, 3408, 2467, 529, # 2918 + 3243, 475, 1447, 3617, 7322, 117, 21, 656, 810, 1297, 2295, 2329, 3502, 7323, 126, 4063, # 2934 + 706, 456, 150, 613, 4299, 71, 1118, 2036, 4064, 145, 3069, 85, 835, 486, 2114, 1246, # 2950 + 1426, 428, 727, 1285, 1015, 800, 106, 623, 303, 1281, 7324, 2127, 2354, 347, 3736, 221, # 2966 + 3503, 3110, 7325, 1955, 1153, 4065, 83, 296, 1199, 3070, 192, 624, 93, 7326, 822, 1897, # 2982 + 2810, 3111, 795, 2064, 991, 1554, 1542, 1592, 27, 43, 2853, 859, 139, 1456, 860, 4300, # 2998 + 437, 712, 3871, 164, 2392, 3112, 695, 211, 3017, 2096, 195, 3872, 1608, 3504, 3505, 3618, # 3014 + 3873, 234, 811, 2971, 2097, 3874, 2229, 1441, 3506, 1615, 2375, 668, 2076, 1638, 305, 228, # 3030 + 1664, 4301, 467, 415, 7327, 262, 2098, 1593, 239, 108, 300, 200, 1033, 512, 1247, 2077, # 3046 + 7328, 7329, 2173, 3176, 3619, 2673, 593, 845, 1062, 3244, 88, 1723, 2037, 3875, 1950, 212, # 3062 + 266, 152, 149, 468, 1898, 4066, 4302, 77, 187, 7330, 3018, 37, 5, 2972, 7331, 3876, # 3078 + 7332, 7333, 39, 2517, 4303, 2894, 3177, 2078, 55, 148, 74, 4304, 545, 483, 1474, 1029, # 3094 + 1665, 217, 1869, 1531, 3113, 1104, 2645, 4067, 24, 172, 3507, 900, 3877, 3508, 3509, 4305, # 3110 + 32, 1408, 2811, 1312, 329, 487, 2355, 2247, 2708, 784, 2674, 4, 3019, 3314, 1427, 1788, # 3126 + 188, 109, 499, 7334, 3620, 1717, 1789, 888, 1217, 3020, 4306, 7335, 3510, 7336, 3315, 1520, # 3142 + 3621, 3878, 196, 1034, 775, 7337, 7338, 929, 1815, 249, 439, 38, 7339, 1063, 7340, 794, # 3158 + 3879, 1435, 2296, 46, 178, 3245, 2065, 7341, 2376, 7342, 214, 1709, 4307, 804, 35, 707, # 3174 + 324, 3622, 1601, 2546, 140, 459, 4068, 7343, 7344, 1365, 839, 272, 978, 2257, 2572, 3409, # 3190 + 2128, 1363, 3623, 1423, 697, 100, 3071, 48, 70, 1231, 495, 3114, 2193, 7345, 1294, 7346, # 3206 + 2079, 462, 586, 1042, 3246, 853, 256, 988, 185, 2377, 3410, 1698, 434, 1084, 7347, 3411, # 3222 + 314, 2615, 2775, 4308, 2330, 2331, 569, 2280, 637, 1816, 2518, 757, 1162, 1878, 1616, 3412, # 3238 + 287, 1577, 2115, 768, 4309, 1671, 2854, 3511, 2519, 1321, 3737, 909, 2413, 7348, 4069, 933, # 3254 + 3738, 7349, 2052, 2356, 1222, 4310, 765, 2414, 1322, 786, 4311, 7350, 1919, 1462, 1677, 2895, # 3270 + 1699, 7351, 4312, 1424, 2437, 3115, 3624, 2590, 3316, 1774, 1940, 3413, 3880, 4070, 309, 1369, # 3286 + 1130, 2812, 364, 2230, 1653, 1299, 3881, 3512, 3882, 3883, 2646, 525, 1085, 3021, 902, 2000, # 3302 + 1475, 964, 4313, 421, 1844, 1415, 1057, 2281, 940, 1364, 3116, 376, 4314, 4315, 1381, 7, # 3318 + 2520, 983, 2378, 336, 1710, 2675, 1845, 321, 3414, 559, 1131, 3022, 2742, 1808, 1132, 1313, # 3334 + 265, 1481, 1857, 7352, 352, 1203, 2813, 3247, 167, 1089, 420, 2814, 776, 792, 1724, 3513, # 3350 + 4071, 2438, 3248, 7353, 4072, 7354, 446, 229, 333, 2743, 901, 3739, 1200, 1557, 4316, 2647, # 3366 + 1920, 395, 2744, 2676, 3740, 4073, 1835, 125, 916, 3178, 2616, 4317, 7355, 7356, 3741, 7357, # 3382 + 7358, 7359, 4318, 3117, 3625, 1133, 2547, 1757, 3415, 1510, 2313, 1409, 3514, 7360, 2145, 438, # 3398 + 2591, 2896, 2379, 3317, 1068, 958, 3023, 461, 311, 2855, 2677, 4074, 1915, 3179, 4075, 1978, # 3414 + 383, 750, 2745, 2617, 4076, 274, 539, 385, 1278, 1442, 7361, 1154, 1964, 384, 561, 210, # 3430 + 98, 1295, 2548, 3515, 7362, 1711, 2415, 1482, 3416, 3884, 2897, 1257, 129, 7363, 3742, 642, # 3446 + 523, 2776, 2777, 2648, 7364, 141, 2231, 1333, 68, 176, 441, 876, 907, 4077, 603, 2592, # 3462 + 710, 171, 3417, 404, 549, 18, 3118, 2393, 1410, 3626, 1666, 7365, 3516, 4319, 2898, 4320, # 3478 + 7366, 2973, 368, 7367, 146, 366, 99, 871, 3627, 1543, 748, 807, 1586, 1185, 22, 2258, # 3494 + 379, 3743, 3180, 7368, 3181, 505, 1941, 2618, 1991, 1382, 2314, 7369, 380, 2357, 218, 702, # 3510 + 1817, 1248, 3418, 3024, 3517, 3318, 3249, 7370, 2974, 3628, 930, 3250, 3744, 7371, 59, 7372, # 3526 + 585, 601, 4078, 497, 3419, 1112, 1314, 4321, 1801, 7373, 1223, 1472, 2174, 7374, 749, 1836, # 3542 + 690, 1899, 3745, 1772, 3885, 1476, 429, 1043, 1790, 2232, 2116, 917, 4079, 447, 1086, 1629, # 3558 + 7375, 556, 7376, 7377, 2020, 1654, 844, 1090, 105, 550, 966, 1758, 2815, 1008, 1782, 686, # 3574 + 1095, 7378, 2282, 793, 1602, 7379, 3518, 2593, 4322, 4080, 2933, 2297, 4323, 3746, 980, 2496, # 3590 + 544, 353, 527, 4324, 908, 2678, 2899, 7380, 381, 2619, 1942, 1348, 7381, 1341, 1252, 560, # 3606 + 3072, 7382, 3420, 2856, 7383, 2053, 973, 886, 2080, 143, 4325, 7384, 7385, 157, 3886, 496, # 3622 + 4081, 57, 840, 540, 2038, 4326, 4327, 3421, 2117, 1445, 970, 2259, 1748, 1965, 2081, 4082, # 3638 + 3119, 1234, 1775, 3251, 2816, 3629, 773, 1206, 2129, 1066, 2039, 1326, 3887, 1738, 1725, 4083, # 3654 + 279, 3120, 51, 1544, 2594, 423, 1578, 2130, 2066, 173, 4328, 1879, 7386, 7387, 1583, 264, # 3670 + 610, 3630, 4329, 2439, 280, 154, 7388, 7389, 7390, 1739, 338, 1282, 3073, 693, 2857, 1411, # 3686 + 1074, 3747, 2440, 7391, 4330, 7392, 7393, 1240, 952, 2394, 7394, 2900, 1538, 2679, 685, 1483, # 3702 + 4084, 2468, 1436, 953, 4085, 2054, 4331, 671, 2395, 79, 4086, 2441, 3252, 608, 567, 2680, # 3718 + 3422, 4087, 4088, 1691, 393, 1261, 1791, 2396, 7395, 4332, 7396, 7397, 7398, 7399, 1383, 1672, # 3734 + 3748, 3182, 1464, 522, 1119, 661, 1150, 216, 675, 4333, 3888, 1432, 3519, 609, 4334, 2681, # 3750 + 2397, 7400, 7401, 7402, 4089, 3025, 0, 7403, 2469, 315, 231, 2442, 301, 3319, 4335, 2380, # 3766 + 7404, 233, 4090, 3631, 1818, 4336, 4337, 7405, 96, 1776, 1315, 2082, 7406, 257, 7407, 1809, # 3782 + 3632, 2709, 1139, 1819, 4091, 2021, 1124, 2163, 2778, 1777, 2649, 7408, 3074, 363, 1655, 3183, # 3798 + 7409, 2975, 7410, 7411, 7412, 3889, 1567, 3890, 718, 103, 3184, 849, 1443, 341, 3320, 2934, # 3814 + 1484, 7413, 1712, 127, 67, 339, 4092, 2398, 679, 1412, 821, 7414, 7415, 834, 738, 351, # 3830 + 2976, 2146, 846, 235, 1497, 1880, 418, 1992, 3749, 2710, 186, 1100, 2147, 2746, 3520, 1545, # 3846 + 1355, 2935, 2858, 1377, 583, 3891, 4093, 2573, 2977, 7416, 1298, 3633, 1078, 2549, 3634, 2358, # 3862 + 78, 3750, 3751, 267, 1289, 2099, 2001, 1594, 4094, 348, 369, 1274, 2194, 2175, 1837, 4338, # 3878 + 1820, 2817, 3635, 2747, 2283, 2002, 4339, 2936, 2748, 144, 3321, 882, 4340, 3892, 2749, 3423, # 3894 + 4341, 2901, 7417, 4095, 1726, 320, 7418, 3893, 3026, 788, 2978, 7419, 2818, 1773, 1327, 2859, # 3910 + 3894, 2819, 7420, 1306, 4342, 2003, 1700, 3752, 3521, 2359, 2650, 787, 2022, 506, 824, 3636, # 3926 + 534, 323, 4343, 1044, 3322, 2023, 1900, 946, 3424, 7421, 1778, 1500, 1678, 7422, 1881, 4344, # 3942 + 165, 243, 4345, 3637, 2521, 123, 683, 4096, 764, 4346, 36, 3895, 1792, 589, 2902, 816, # 3958 + 626, 1667, 3027, 2233, 1639, 1555, 1622, 3753, 3896, 7423, 3897, 2860, 1370, 1228, 1932, 891, # 3974 + 2083, 2903, 304, 4097, 7424, 292, 2979, 2711, 3522, 691, 2100, 4098, 1115, 4347, 118, 662, # 3990 + 7425, 611, 1156, 854, 2381, 1316, 2861, 2, 386, 515, 2904, 7426, 7427, 3253, 868, 2234, # 4006 + 1486, 855, 2651, 785, 2212, 3028, 7428, 1040, 3185, 3523, 7429, 3121, 448, 7430, 1525, 7431, # 4022 + 2164, 4348, 7432, 3754, 7433, 4099, 2820, 3524, 3122, 503, 818, 3898, 3123, 1568, 814, 676, # 4038 + 1444, 306, 1749, 7434, 3755, 1416, 1030, 197, 1428, 805, 2821, 1501, 4349, 7435, 7436, 7437, # 4054 + 1993, 7438, 4350, 7439, 7440, 2195, 13, 2779, 3638, 2980, 3124, 1229, 1916, 7441, 3756, 2131, # 4070 + 7442, 4100, 4351, 2399, 3525, 7443, 2213, 1511, 1727, 1120, 7444, 7445, 646, 3757, 2443, 307, # 4086 + 7446, 7447, 1595, 3186, 7448, 7449, 7450, 3639, 1113, 1356, 3899, 1465, 2522, 2523, 7451, 519, # 4102 + 7452, 128, 2132, 92, 2284, 1979, 7453, 3900, 1512, 342, 3125, 2196, 7454, 2780, 2214, 1980, # 4118 + 3323, 7455, 290, 1656, 1317, 789, 827, 2360, 7456, 3758, 4352, 562, 581, 3901, 7457, 401, # 4134 + 4353, 2248, 94, 4354, 1399, 2781, 7458, 1463, 2024, 4355, 3187, 1943, 7459, 828, 1105, 4101, # 4150 + 1262, 1394, 7460, 4102, 605, 4356, 7461, 1783, 2862, 7462, 2822, 819, 2101, 578, 2197, 2937, # 4166 + 7463, 1502, 436, 3254, 4103, 3255, 2823, 3902, 2905, 3425, 3426, 7464, 2712, 2315, 7465, 7466, # 4182 + 2332, 2067, 23, 4357, 193, 826, 3759, 2102, 699, 1630, 4104, 3075, 390, 1793, 1064, 3526, # 4198 + 7467, 1579, 3076, 3077, 1400, 7468, 4105, 1838, 1640, 2863, 7469, 4358, 4359, 137, 4106, 598, # 4214 + 3078, 1966, 780, 104, 974, 2938, 7470, 278, 899, 253, 402, 572, 504, 493, 1339, 7471, # 4230 + 3903, 1275, 4360, 2574, 2550, 7472, 3640, 3029, 3079, 2249, 565, 1334, 2713, 863, 41, 7473, # 4246 + 7474, 4361, 7475, 1657, 2333, 19, 463, 2750, 4107, 606, 7476, 2981, 3256, 1087, 2084, 1323, # 4262 + 2652, 2982, 7477, 1631, 1623, 1750, 4108, 2682, 7478, 2864, 791, 2714, 2653, 2334, 232, 2416, # 4278 + 7479, 2983, 1498, 7480, 2654, 2620, 755, 1366, 3641, 3257, 3126, 2025, 1609, 119, 1917, 3427, # 4294 + 862, 1026, 4109, 7481, 3904, 3760, 4362, 3905, 4363, 2260, 1951, 2470, 7482, 1125, 817, 4110, # 4310 + 4111, 3906, 1513, 1766, 2040, 1487, 4112, 3030, 3258, 2824, 3761, 3127, 7483, 7484, 1507, 7485, # 4326 + 2683, 733, 40, 1632, 1106, 2865, 345, 4113, 841, 2524, 230, 4364, 2984, 1846, 3259, 3428, # 4342 + 7486, 1263, 986, 3429, 7487, 735, 879, 254, 1137, 857, 622, 1300, 1180, 1388, 1562, 3907, # 4358 + 3908, 2939, 967, 2751, 2655, 1349, 592, 2133, 1692, 3324, 2985, 1994, 4114, 1679, 3909, 1901, # 4374 + 2185, 7488, 739, 3642, 2715, 1296, 1290, 7489, 4115, 2198, 2199, 1921, 1563, 2595, 2551, 1870, # 4390 + 2752, 2986, 7490, 435, 7491, 343, 1108, 596, 17, 1751, 4365, 2235, 3430, 3643, 7492, 4366, # 4406 + 294, 3527, 2940, 1693, 477, 979, 281, 2041, 3528, 643, 2042, 3644, 2621, 2782, 2261, 1031, # 4422 + 2335, 2134, 2298, 3529, 4367, 367, 1249, 2552, 7493, 3530, 7494, 4368, 1283, 3325, 2004, 240, # 4438 + 1762, 3326, 4369, 4370, 836, 1069, 3128, 474, 7495, 2148, 2525, 268, 3531, 7496, 3188, 1521, # 4454 + 1284, 7497, 1658, 1546, 4116, 7498, 3532, 3533, 7499, 4117, 3327, 2684, 1685, 4118, 961, 1673, # 4470 + 2622, 190, 2005, 2200, 3762, 4371, 4372, 7500, 570, 2497, 3645, 1490, 7501, 4373, 2623, 3260, # 4486 + 1956, 4374, 584, 1514, 396, 1045, 1944, 7502, 4375, 1967, 2444, 7503, 7504, 4376, 3910, 619, # 4502 + 7505, 3129, 3261, 215, 2006, 2783, 2553, 3189, 4377, 3190, 4378, 763, 4119, 3763, 4379, 7506, # 4518 + 7507, 1957, 1767, 2941, 3328, 3646, 1174, 452, 1477, 4380, 3329, 3130, 7508, 2825, 1253, 2382, # 4534 + 2186, 1091, 2285, 4120, 492, 7509, 638, 1169, 1824, 2135, 1752, 3911, 648, 926, 1021, 1324, # 4550 + 4381, 520, 4382, 997, 847, 1007, 892, 4383, 3764, 2262, 1871, 3647, 7510, 2400, 1784, 4384, # 4566 + 1952, 2942, 3080, 3191, 1728, 4121, 2043, 3648, 4385, 2007, 1701, 3131, 1551, 30, 2263, 4122, # 4582 + 7511, 2026, 4386, 3534, 7512, 501, 7513, 4123, 594, 3431, 2165, 1821, 3535, 3432, 3536, 3192, # 4598 + 829, 2826, 4124, 7514, 1680, 3132, 1225, 4125, 7515, 3262, 4387, 4126, 3133, 2336, 7516, 4388, # 4614 + 4127, 7517, 3912, 3913, 7518, 1847, 2383, 2596, 3330, 7519, 4389, 374, 3914, 652, 4128, 4129, # 4630 + 375, 1140, 798, 7520, 7521, 7522, 2361, 4390, 2264, 546, 1659, 138, 3031, 2445, 4391, 7523, # 4646 + 2250, 612, 1848, 910, 796, 3765, 1740, 1371, 825, 3766, 3767, 7524, 2906, 2554, 7525, 692, # 4662 + 444, 3032, 2624, 801, 4392, 4130, 7526, 1491, 244, 1053, 3033, 4131, 4132, 340, 7527, 3915, # 4678 + 1041, 2987, 293, 1168, 87, 1357, 7528, 1539, 959, 7529, 2236, 721, 694, 4133, 3768, 219, # 4694 + 1478, 644, 1417, 3331, 2656, 1413, 1401, 1335, 1389, 3916, 7530, 7531, 2988, 2362, 3134, 1825, # 4710 + 730, 1515, 184, 2827, 66, 4393, 7532, 1660, 2943, 246, 3332, 378, 1457, 226, 3433, 975, # 4726 + 3917, 2944, 1264, 3537, 674, 696, 7533, 163, 7534, 1141, 2417, 2166, 713, 3538, 3333, 4394, # 4742 + 3918, 7535, 7536, 1186, 15, 7537, 1079, 1070, 7538, 1522, 3193, 3539, 276, 1050, 2716, 758, # 4758 + 1126, 653, 2945, 3263, 7539, 2337, 889, 3540, 3919, 3081, 2989, 903, 1250, 4395, 3920, 3434, # 4774 + 3541, 1342, 1681, 1718, 766, 3264, 286, 89, 2946, 3649, 7540, 1713, 7541, 2597, 3334, 2990, # 4790 + 7542, 2947, 2215, 3194, 2866, 7543, 4396, 2498, 2526, 181, 387, 1075, 3921, 731, 2187, 3335, # 4806 + 7544, 3265, 310, 313, 3435, 2299, 770, 4134, 54, 3034, 189, 4397, 3082, 3769, 3922, 7545, # 4822 + 1230, 1617, 1849, 355, 3542, 4135, 4398, 3336, 111, 4136, 3650, 1350, 3135, 3436, 3035, 4137, # 4838 + 2149, 3266, 3543, 7546, 2784, 3923, 3924, 2991, 722, 2008, 7547, 1071, 247, 1207, 2338, 2471, # 4854 + 1378, 4399, 2009, 864, 1437, 1214, 4400, 373, 3770, 1142, 2216, 667, 4401, 442, 2753, 2555, # 4870 + 3771, 3925, 1968, 4138, 3267, 1839, 837, 170, 1107, 934, 1336, 1882, 7548, 7549, 2118, 4139, # 4886 + 2828, 743, 1569, 7550, 4402, 4140, 582, 2384, 1418, 3437, 7551, 1802, 7552, 357, 1395, 1729, # 4902 + 3651, 3268, 2418, 1564, 2237, 7553, 3083, 3772, 1633, 4403, 1114, 2085, 4141, 1532, 7554, 482, # 4918 + 2446, 4404, 7555, 7556, 1492, 833, 1466, 7557, 2717, 3544, 1641, 2829, 7558, 1526, 1272, 3652, # 4934 + 4142, 1686, 1794, 416, 2556, 1902, 1953, 1803, 7559, 3773, 2785, 3774, 1159, 2316, 7560, 2867, # 4950 + 4405, 1610, 1584, 3036, 2419, 2754, 443, 3269, 1163, 3136, 7561, 7562, 3926, 7563, 4143, 2499, # 4966 + 3037, 4406, 3927, 3137, 2103, 1647, 3545, 2010, 1872, 4144, 7564, 4145, 431, 3438, 7565, 250, # 4982 + 97, 81, 4146, 7566, 1648, 1850, 1558, 160, 848, 7567, 866, 740, 1694, 7568, 2201, 2830, # 4998 + 3195, 4147, 4407, 3653, 1687, 950, 2472, 426, 469, 3196, 3654, 3655, 3928, 7569, 7570, 1188, # 5014 + 424, 1995, 861, 3546, 4148, 3775, 2202, 2685, 168, 1235, 3547, 4149, 7571, 2086, 1674, 4408, # 5030 + 3337, 3270, 220, 2557, 1009, 7572, 3776, 670, 2992, 332, 1208, 717, 7573, 7574, 3548, 2447, # 5046 + 3929, 3338, 7575, 513, 7576, 1209, 2868, 3339, 3138, 4409, 1080, 7577, 7578, 7579, 7580, 2527, # 5062 + 3656, 3549, 815, 1587, 3930, 3931, 7581, 3550, 3439, 3777, 1254, 4410, 1328, 3038, 1390, 3932, # 5078 + 1741, 3933, 3778, 3934, 7582, 236, 3779, 2448, 3271, 7583, 7584, 3657, 3780, 1273, 3781, 4411, # 5094 + 7585, 308, 7586, 4412, 245, 4413, 1851, 2473, 1307, 2575, 430, 715, 2136, 2449, 7587, 270, # 5110 + 199, 2869, 3935, 7588, 3551, 2718, 1753, 761, 1754, 725, 1661, 1840, 4414, 3440, 3658, 7589, # 5126 + 7590, 587, 14, 3272, 227, 2598, 326, 480, 2265, 943, 2755, 3552, 291, 650, 1883, 7591, # 5142 + 1702, 1226, 102, 1547, 62, 3441, 904, 4415, 3442, 1164, 4150, 7592, 7593, 1224, 1548, 2756, # 5158 + 391, 498, 1493, 7594, 1386, 1419, 7595, 2055, 1177, 4416, 813, 880, 1081, 2363, 566, 1145, # 5174 + 4417, 2286, 1001, 1035, 2558, 2599, 2238, 394, 1286, 7596, 7597, 2068, 7598, 86, 1494, 1730, # 5190 + 3936, 491, 1588, 745, 897, 2948, 843, 3340, 3937, 2757, 2870, 3273, 1768, 998, 2217, 2069, # 5206 + 397, 1826, 1195, 1969, 3659, 2993, 3341, 284, 7599, 3782, 2500, 2137, 2119, 1903, 7600, 3938, # 5222 + 2150, 3939, 4151, 1036, 3443, 1904, 114, 2559, 4152, 209, 1527, 7601, 7602, 2949, 2831, 2625, # 5238 + 2385, 2719, 3139, 812, 2560, 7603, 3274, 7604, 1559, 737, 1884, 3660, 1210, 885, 28, 2686, # 5254 + 3553, 3783, 7605, 4153, 1004, 1779, 4418, 7606, 346, 1981, 2218, 2687, 4419, 3784, 1742, 797, # 5270 + 1642, 3940, 1933, 1072, 1384, 2151, 896, 3941, 3275, 3661, 3197, 2871, 3554, 7607, 2561, 1958, # 5286 + 4420, 2450, 1785, 7608, 7609, 7610, 3942, 4154, 1005, 1308, 3662, 4155, 2720, 4421, 4422, 1528, # 5302 + 2600, 161, 1178, 4156, 1982, 987, 4423, 1101, 4157, 631, 3943, 1157, 3198, 2420, 1343, 1241, # 5318 + 1016, 2239, 2562, 372, 877, 2339, 2501, 1160, 555, 1934, 911, 3944, 7611, 466, 1170, 169, # 5334 + 1051, 2907, 2688, 3663, 2474, 2994, 1182, 2011, 2563, 1251, 2626, 7612, 992, 2340, 3444, 1540, # 5350 + 2721, 1201, 2070, 2401, 1996, 2475, 7613, 4424, 528, 1922, 2188, 1503, 1873, 1570, 2364, 3342, # 5366 + 3276, 7614, 557, 1073, 7615, 1827, 3445, 2087, 2266, 3140, 3039, 3084, 767, 3085, 2786, 4425, # 5382 + 1006, 4158, 4426, 2341, 1267, 2176, 3664, 3199, 778, 3945, 3200, 2722, 1597, 2657, 7616, 4427, # 5398 + 7617, 3446, 7618, 7619, 7620, 3277, 2689, 1433, 3278, 131, 95, 1504, 3946, 723, 4159, 3141, # 5414 + 1841, 3555, 2758, 2189, 3947, 2027, 2104, 3665, 7621, 2995, 3948, 1218, 7622, 3343, 3201, 3949, # 5430 + 4160, 2576, 248, 1634, 3785, 912, 7623, 2832, 3666, 3040, 3786, 654, 53, 7624, 2996, 7625, # 5446 + 1688, 4428, 777, 3447, 1032, 3950, 1425, 7626, 191, 820, 2120, 2833, 971, 4429, 931, 3202, # 5462 + 135, 664, 783, 3787, 1997, 772, 2908, 1935, 3951, 3788, 4430, 2909, 3203, 282, 2723, 640, # 5478 + 1372, 3448, 1127, 922, 325, 3344, 7627, 7628, 711, 2044, 7629, 7630, 3952, 2219, 2787, 1936, # 5494 + 3953, 3345, 2220, 2251, 3789, 2300, 7631, 4431, 3790, 1258, 3279, 3954, 3204, 2138, 2950, 3955, # 5510 + 3956, 7632, 2221, 258, 3205, 4432, 101, 1227, 7633, 3280, 1755, 7634, 1391, 3281, 7635, 2910, # 5526 + 2056, 893, 7636, 7637, 7638, 1402, 4161, 2342, 7639, 7640, 3206, 3556, 7641, 7642, 878, 1325, # 5542 + 1780, 2788, 4433, 259, 1385, 2577, 744, 1183, 2267, 4434, 7643, 3957, 2502, 7644, 684, 1024, # 5558 + 4162, 7645, 472, 3557, 3449, 1165, 3282, 3958, 3959, 322, 2152, 881, 455, 1695, 1152, 1340, # 5574 + 660, 554, 2153, 4435, 1058, 4436, 4163, 830, 1065, 3346, 3960, 4437, 1923, 7646, 1703, 1918, # 5590 + 7647, 932, 2268, 122, 7648, 4438, 947, 677, 7649, 3791, 2627, 297, 1905, 1924, 2269, 4439, # 5606 + 2317, 3283, 7650, 7651, 4164, 7652, 4165, 84, 4166, 112, 989, 7653, 547, 1059, 3961, 701, # 5622 + 3558, 1019, 7654, 4167, 7655, 3450, 942, 639, 457, 2301, 2451, 993, 2951, 407, 851, 494, # 5638 + 4440, 3347, 927, 7656, 1237, 7657, 2421, 3348, 573, 4168, 680, 921, 2911, 1279, 1874, 285, # 5654 + 790, 1448, 1983, 719, 2167, 7658, 7659, 4441, 3962, 3963, 1649, 7660, 1541, 563, 7661, 1077, # 5670 + 7662, 3349, 3041, 3451, 511, 2997, 3964, 3965, 3667, 3966, 1268, 2564, 3350, 3207, 4442, 4443, # 5686 + 7663, 535, 1048, 1276, 1189, 2912, 2028, 3142, 1438, 1373, 2834, 2952, 1134, 2012, 7664, 4169, # 5702 + 1238, 2578, 3086, 1259, 7665, 700, 7666, 2953, 3143, 3668, 4170, 7667, 4171, 1146, 1875, 1906, # 5718 + 4444, 2601, 3967, 781, 2422, 132, 1589, 203, 147, 273, 2789, 2402, 898, 1786, 2154, 3968, # 5734 + 3969, 7668, 3792, 2790, 7669, 7670, 4445, 4446, 7671, 3208, 7672, 1635, 3793, 965, 7673, 1804, # 5750 + 2690, 1516, 3559, 1121, 1082, 1329, 3284, 3970, 1449, 3794, 65, 1128, 2835, 2913, 2759, 1590, # 5766 + 3795, 7674, 7675, 12, 2658, 45, 976, 2579, 3144, 4447, 517, 2528, 1013, 1037, 3209, 7676, # 5782 + 3796, 2836, 7677, 3797, 7678, 3452, 7679, 2602, 614, 1998, 2318, 3798, 3087, 2724, 2628, 7680, # 5798 + 2580, 4172, 599, 1269, 7681, 1810, 3669, 7682, 2691, 3088, 759, 1060, 489, 1805, 3351, 3285, # 5814 + 1358, 7683, 7684, 2386, 1387, 1215, 2629, 2252, 490, 7685, 7686, 4173, 1759, 2387, 2343, 7687, # 5830 + 4448, 3799, 1907, 3971, 2630, 1806, 3210, 4449, 3453, 3286, 2760, 2344, 874, 7688, 7689, 3454, # 5846 + 3670, 1858, 91, 2914, 3671, 3042, 3800, 4450, 7690, 3145, 3972, 2659, 7691, 3455, 1202, 1403, # 5862 + 3801, 2954, 2529, 1517, 2503, 4451, 3456, 2504, 7692, 4452, 7693, 2692, 1885, 1495, 1731, 3973, # 5878 + 2365, 4453, 7694, 2029, 7695, 7696, 3974, 2693, 1216, 237, 2581, 4174, 2319, 3975, 3802, 4454, # 5894 + 4455, 2694, 3560, 3457, 445, 4456, 7697, 7698, 7699, 7700, 2761, 61, 3976, 3672, 1822, 3977, # 5910 + 7701, 687, 2045, 935, 925, 405, 2660, 703, 1096, 1859, 2725, 4457, 3978, 1876, 1367, 2695, # 5926 + 3352, 918, 2105, 1781, 2476, 334, 3287, 1611, 1093, 4458, 564, 3146, 3458, 3673, 3353, 945, # 5942 + 2631, 2057, 4459, 7702, 1925, 872, 4175, 7703, 3459, 2696, 3089, 349, 4176, 3674, 3979, 4460, # 5958 + 3803, 4177, 3675, 2155, 3980, 4461, 4462, 4178, 4463, 2403, 2046, 782, 3981, 400, 251, 4179, # 5974 + 1624, 7704, 7705, 277, 3676, 299, 1265, 476, 1191, 3804, 2121, 4180, 4181, 1109, 205, 7706, # 5990 + 2582, 1000, 2156, 3561, 1860, 7707, 7708, 7709, 4464, 7710, 4465, 2565, 107, 2477, 2157, 3982, # 6006 + 3460, 3147, 7711, 1533, 541, 1301, 158, 753, 4182, 2872, 3562, 7712, 1696, 370, 1088, 4183, # 6022 + 4466, 3563, 579, 327, 440, 162, 2240, 269, 1937, 1374, 3461, 968, 3043, 56, 1396, 3090, # 6038 + 2106, 3288, 3354, 7713, 1926, 2158, 4467, 2998, 7714, 3564, 7715, 7716, 3677, 4468, 2478, 7717, # 6054 + 2791, 7718, 1650, 4469, 7719, 2603, 7720, 7721, 3983, 2661, 3355, 1149, 3356, 3984, 3805, 3985, # 6070 + 7722, 1076, 49, 7723, 951, 3211, 3289, 3290, 450, 2837, 920, 7724, 1811, 2792, 2366, 4184, # 6086 + 1908, 1138, 2367, 3806, 3462, 7725, 3212, 4470, 1909, 1147, 1518, 2423, 4471, 3807, 7726, 4472, # 6102 + 2388, 2604, 260, 1795, 3213, 7727, 7728, 3808, 3291, 708, 7729, 3565, 1704, 7730, 3566, 1351, # 6118 + 1618, 3357, 2999, 1886, 944, 4185, 3358, 4186, 3044, 3359, 4187, 7731, 3678, 422, 413, 1714, # 6134 + 3292, 500, 2058, 2345, 4188, 2479, 7732, 1344, 1910, 954, 7733, 1668, 7734, 7735, 3986, 2404, # 6150 + 4189, 3567, 3809, 4190, 7736, 2302, 1318, 2505, 3091, 133, 3092, 2873, 4473, 629, 31, 2838, # 6166 + 2697, 3810, 4474, 850, 949, 4475, 3987, 2955, 1732, 2088, 4191, 1496, 1852, 7737, 3988, 620, # 6182 + 3214, 981, 1242, 3679, 3360, 1619, 3680, 1643, 3293, 2139, 2452, 1970, 1719, 3463, 2168, 7738, # 6198 + 3215, 7739, 7740, 3361, 1828, 7741, 1277, 4476, 1565, 2047, 7742, 1636, 3568, 3093, 7743, 869, # 6214 + 2839, 655, 3811, 3812, 3094, 3989, 3000, 3813, 1310, 3569, 4477, 7744, 7745, 7746, 1733, 558, # 6230 + 4478, 3681, 335, 1549, 3045, 1756, 4192, 3682, 1945, 3464, 1829, 1291, 1192, 470, 2726, 2107, # 6246 + 2793, 913, 1054, 3990, 7747, 1027, 7748, 3046, 3991, 4479, 982, 2662, 3362, 3148, 3465, 3216, # 6262 + 3217, 1946, 2794, 7749, 571, 4480, 7750, 1830, 7751, 3570, 2583, 1523, 2424, 7752, 2089, 984, # 6278 + 4481, 3683, 1959, 7753, 3684, 852, 923, 2795, 3466, 3685, 969, 1519, 999, 2048, 2320, 1705, # 6294 + 7754, 3095, 615, 1662, 151, 597, 3992, 2405, 2321, 1049, 275, 4482, 3686, 4193, 568, 3687, # 6310 + 3571, 2480, 4194, 3688, 7755, 2425, 2270, 409, 3218, 7756, 1566, 2874, 3467, 1002, 769, 2840, # 6326 + 194, 2090, 3149, 3689, 2222, 3294, 4195, 628, 1505, 7757, 7758, 1763, 2177, 3001, 3993, 521, # 6342 + 1161, 2584, 1787, 2203, 2406, 4483, 3994, 1625, 4196, 4197, 412, 42, 3096, 464, 7759, 2632, # 6358 + 4484, 3363, 1760, 1571, 2875, 3468, 2530, 1219, 2204, 3814, 2633, 2140, 2368, 4485, 4486, 3295, # 6374 + 1651, 3364, 3572, 7760, 7761, 3573, 2481, 3469, 7762, 3690, 7763, 7764, 2271, 2091, 460, 7765, # 6390 + 4487, 7766, 3002, 962, 588, 3574, 289, 3219, 2634, 1116, 52, 7767, 3047, 1796, 7768, 7769, # 6406 + 7770, 1467, 7771, 1598, 1143, 3691, 4198, 1984, 1734, 1067, 4488, 1280, 3365, 465, 4489, 1572, # 6422 + 510, 7772, 1927, 2241, 1812, 1644, 3575, 7773, 4490, 3692, 7774, 7775, 2663, 1573, 1534, 7776, # 6438 + 7777, 4199, 536, 1807, 1761, 3470, 3815, 3150, 2635, 7778, 7779, 7780, 4491, 3471, 2915, 1911, # 6454 + 2796, 7781, 3296, 1122, 377, 3220, 7782, 360, 7783, 7784, 4200, 1529, 551, 7785, 2059, 3693, # 6470 + 1769, 2426, 7786, 2916, 4201, 3297, 3097, 2322, 2108, 2030, 4492, 1404, 136, 1468, 1479, 672, # 6486 + 1171, 3221, 2303, 271, 3151, 7787, 2762, 7788, 2049, 678, 2727, 865, 1947, 4493, 7789, 2013, # 6502 + 3995, 2956, 7790, 2728, 2223, 1397, 3048, 3694, 4494, 4495, 1735, 2917, 3366, 3576, 7791, 3816, # 6518 + 509, 2841, 2453, 2876, 3817, 7792, 7793, 3152, 3153, 4496, 4202, 2531, 4497, 2304, 1166, 1010, # 6534 + 552, 681, 1887, 7794, 7795, 2957, 2958, 3996, 1287, 1596, 1861, 3154, 358, 453, 736, 175, # 6550 + 478, 1117, 905, 1167, 1097, 7796, 1853, 1530, 7797, 1706, 7798, 2178, 3472, 2287, 3695, 3473, # 6566 + 3577, 4203, 2092, 4204, 7799, 3367, 1193, 2482, 4205, 1458, 2190, 2205, 1862, 1888, 1421, 3298, # 6582 + 2918, 3049, 2179, 3474, 595, 2122, 7800, 3997, 7801, 7802, 4206, 1707, 2636, 223, 3696, 1359, # 6598 + 751, 3098, 183, 3475, 7803, 2797, 3003, 419, 2369, 633, 704, 3818, 2389, 241, 7804, 7805, # 6614 + 7806, 838, 3004, 3697, 2272, 2763, 2454, 3819, 1938, 2050, 3998, 1309, 3099, 2242, 1181, 7807, # 6630 + 1136, 2206, 3820, 2370, 1446, 4207, 2305, 4498, 7808, 7809, 4208, 1055, 2605, 484, 3698, 7810, # 6646 + 3999, 625, 4209, 2273, 3368, 1499, 4210, 4000, 7811, 4001, 4211, 3222, 2274, 2275, 3476, 7812, # 6662 + 7813, 2764, 808, 2606, 3699, 3369, 4002, 4212, 3100, 2532, 526, 3370, 3821, 4213, 955, 7814, # 6678 + 1620, 4214, 2637, 2427, 7815, 1429, 3700, 1669, 1831, 994, 928, 7816, 3578, 1260, 7817, 7818, # 6694 + 7819, 1948, 2288, 741, 2919, 1626, 4215, 2729, 2455, 867, 1184, 362, 3371, 1392, 7820, 7821, # 6710 + 4003, 4216, 1770, 1736, 3223, 2920, 4499, 4500, 1928, 2698, 1459, 1158, 7822, 3050, 3372, 2877, # 6726 + 1292, 1929, 2506, 2842, 3701, 1985, 1187, 2071, 2014, 2607, 4217, 7823, 2566, 2507, 2169, 3702, # 6742 + 2483, 3299, 7824, 3703, 4501, 7825, 7826, 666, 1003, 3005, 1022, 3579, 4218, 7827, 4502, 1813, # 6758 + 2253, 574, 3822, 1603, 295, 1535, 705, 3823, 4219, 283, 858, 417, 7828, 7829, 3224, 4503, # 6774 + 4504, 3051, 1220, 1889, 1046, 2276, 2456, 4004, 1393, 1599, 689, 2567, 388, 4220, 7830, 2484, # 6790 + 802, 7831, 2798, 3824, 2060, 1405, 2254, 7832, 4505, 3825, 2109, 1052, 1345, 3225, 1585, 7833, # 6806 + 809, 7834, 7835, 7836, 575, 2730, 3477, 956, 1552, 1469, 1144, 2323, 7837, 2324, 1560, 2457, # 6822 + 3580, 3226, 4005, 616, 2207, 3155, 2180, 2289, 7838, 1832, 7839, 3478, 4506, 7840, 1319, 3704, # 6838 + 3705, 1211, 3581, 1023, 3227, 1293, 2799, 7841, 7842, 7843, 3826, 607, 2306, 3827, 762, 2878, # 6854 + 1439, 4221, 1360, 7844, 1485, 3052, 7845, 4507, 1038, 4222, 1450, 2061, 2638, 4223, 1379, 4508, # 6870 + 2585, 7846, 7847, 4224, 1352, 1414, 2325, 2921, 1172, 7848, 7849, 3828, 3829, 7850, 1797, 1451, # 6886 + 7851, 7852, 7853, 7854, 2922, 4006, 4007, 2485, 2346, 411, 4008, 4009, 3582, 3300, 3101, 4509, # 6902 + 1561, 2664, 1452, 4010, 1375, 7855, 7856, 47, 2959, 316, 7857, 1406, 1591, 2923, 3156, 7858, # 6918 + 1025, 2141, 3102, 3157, 354, 2731, 884, 2224, 4225, 2407, 508, 3706, 726, 3583, 996, 2428, # 6934 + 3584, 729, 7859, 392, 2191, 1453, 4011, 4510, 3707, 7860, 7861, 2458, 3585, 2608, 1675, 2800, # 6950 + 919, 2347, 2960, 2348, 1270, 4511, 4012, 73, 7862, 7863, 647, 7864, 3228, 2843, 2255, 1550, # 6966 + 1346, 3006, 7865, 1332, 883, 3479, 7866, 7867, 7868, 7869, 3301, 2765, 7870, 1212, 831, 1347, # 6982 + 4226, 4512, 2326, 3830, 1863, 3053, 720, 3831, 4513, 4514, 3832, 7871, 4227, 7872, 7873, 4515, # 6998 + 7874, 7875, 1798, 4516, 3708, 2609, 4517, 3586, 1645, 2371, 7876, 7877, 2924, 669, 2208, 2665, # 7014 + 2429, 7878, 2879, 7879, 7880, 1028, 3229, 7881, 4228, 2408, 7882, 2256, 1353, 7883, 7884, 4518, # 7030 + 3158, 518, 7885, 4013, 7886, 4229, 1960, 7887, 2142, 4230, 7888, 7889, 3007, 2349, 2350, 3833, # 7046 + 516, 1833, 1454, 4014, 2699, 4231, 4519, 2225, 2610, 1971, 1129, 3587, 7890, 2766, 7891, 2961, # 7062 + 1422, 577, 1470, 3008, 1524, 3373, 7892, 7893, 432, 4232, 3054, 3480, 7894, 2586, 1455, 2508, # 7078 + 2226, 1972, 1175, 7895, 1020, 2732, 4015, 3481, 4520, 7896, 2733, 7897, 1743, 1361, 3055, 3482, # 7094 + 2639, 4016, 4233, 4521, 2290, 895, 924, 4234, 2170, 331, 2243, 3056, 166, 1627, 3057, 1098, # 7110 + 7898, 1232, 2880, 2227, 3374, 4522, 657, 403, 1196, 2372, 542, 3709, 3375, 1600, 4235, 3483, # 7126 + 7899, 4523, 2767, 3230, 576, 530, 1362, 7900, 4524, 2533, 2666, 3710, 4017, 7901, 842, 3834, # 7142 + 7902, 2801, 2031, 1014, 4018, 213, 2700, 3376, 665, 621, 4236, 7903, 3711, 2925, 2430, 7904, # 7158 + 2431, 3302, 3588, 3377, 7905, 4237, 2534, 4238, 4525, 3589, 1682, 4239, 3484, 1380, 7906, 724, # 7174 + 2277, 600, 1670, 7907, 1337, 1233, 4526, 3103, 2244, 7908, 1621, 4527, 7909, 651, 4240, 7910, # 7190 + 1612, 4241, 2611, 7911, 2844, 7912, 2734, 2307, 3058, 7913, 716, 2459, 3059, 174, 1255, 2701, # 7206 + 4019, 3590, 548, 1320, 1398, 728, 4020, 1574, 7914, 1890, 1197, 3060, 4021, 7915, 3061, 3062, # 7222 + 3712, 3591, 3713, 747, 7916, 635, 4242, 4528, 7917, 7918, 7919, 4243, 7920, 7921, 4529, 7922, # 7238 + 3378, 4530, 2432, 451, 7923, 3714, 2535, 2072, 4244, 2735, 4245, 4022, 7924, 1764, 4531, 7925, # 7254 + 4246, 350, 7926, 2278, 2390, 2486, 7927, 4247, 4023, 2245, 1434, 4024, 488, 4532, 458, 4248, # 7270 + 4025, 3715, 771, 1330, 2391, 3835, 2568, 3159, 2159, 2409, 1553, 2667, 3160, 4249, 7928, 2487, # 7286 + 2881, 2612, 1720, 2702, 4250, 3379, 4533, 7929, 2536, 4251, 7930, 3231, 4252, 2768, 7931, 2015, # 7302 + 2736, 7932, 1155, 1017, 3716, 3836, 7933, 3303, 2308, 201, 1864, 4253, 1430, 7934, 4026, 7935, # 7318 + 7936, 7937, 7938, 7939, 4254, 1604, 7940, 414, 1865, 371, 2587, 4534, 4535, 3485, 2016, 3104, # 7334 + 4536, 1708, 960, 4255, 887, 389, 2171, 1536, 1663, 1721, 7941, 2228, 4027, 2351, 2926, 1580, # 7350 + 7942, 7943, 7944, 1744, 7945, 2537, 4537, 4538, 7946, 4539, 7947, 2073, 7948, 7949, 3592, 3380, # 7366 + 2882, 4256, 7950, 4257, 2640, 3381, 2802, 673, 2703, 2460, 709, 3486, 4028, 3593, 4258, 7951, # 7382 + 1148, 502, 634, 7952, 7953, 1204, 4540, 3594, 1575, 4541, 2613, 3717, 7954, 3718, 3105, 948, # 7398 + 3232, 121, 1745, 3837, 1110, 7955, 4259, 3063, 2509, 3009, 4029, 3719, 1151, 1771, 3838, 1488, # 7414 + 4030, 1986, 7956, 2433, 3487, 7957, 7958, 2093, 7959, 4260, 3839, 1213, 1407, 2803, 531, 2737, # 7430 + 2538, 3233, 1011, 1537, 7960, 2769, 4261, 3106, 1061, 7961, 3720, 3721, 1866, 2883, 7962, 2017, # 7446 + 120, 4262, 4263, 2062, 3595, 3234, 2309, 3840, 2668, 3382, 1954, 4542, 7963, 7964, 3488, 1047, # 7462 + 2704, 1266, 7965, 1368, 4543, 2845, 649, 3383, 3841, 2539, 2738, 1102, 2846, 2669, 7966, 7967, # 7478 + 1999, 7968, 1111, 3596, 2962, 7969, 2488, 3842, 3597, 2804, 1854, 3384, 3722, 7970, 7971, 3385, # 7494 + 2410, 2884, 3304, 3235, 3598, 7972, 2569, 7973, 3599, 2805, 4031, 1460, 856, 7974, 3600, 7975, # 7510 + 2885, 2963, 7976, 2886, 3843, 7977, 4264, 632, 2510, 875, 3844, 1697, 3845, 2291, 7978, 7979, # 7526 + 4544, 3010, 1239, 580, 4545, 4265, 7980, 914, 936, 2074, 1190, 4032, 1039, 2123, 7981, 7982, # 7542 + 7983, 3386, 1473, 7984, 1354, 4266, 3846, 7985, 2172, 3064, 4033, 915, 3305, 4267, 4268, 3306, # 7558 + 1605, 1834, 7986, 2739, 398, 3601, 4269, 3847, 4034, 328, 1912, 2847, 4035, 3848, 1331, 4270, # 7574 + 3011, 937, 4271, 7987, 3602, 4036, 4037, 3387, 2160, 4546, 3388, 524, 742, 538, 3065, 1012, # 7590 + 7988, 7989, 3849, 2461, 7990, 658, 1103, 225, 3850, 7991, 7992, 4547, 7993, 4548, 7994, 3236, # 7606 + 1243, 7995, 4038, 963, 2246, 4549, 7996, 2705, 3603, 3161, 7997, 7998, 2588, 2327, 7999, 4550, # 7622 + 8000, 8001, 8002, 3489, 3307, 957, 3389, 2540, 2032, 1930, 2927, 2462, 870, 2018, 3604, 1746, # 7638 + 2770, 2771, 2434, 2463, 8003, 3851, 8004, 3723, 3107, 3724, 3490, 3390, 3725, 8005, 1179, 3066, # 7654 + 8006, 3162, 2373, 4272, 3726, 2541, 3163, 3108, 2740, 4039, 8007, 3391, 1556, 2542, 2292, 977, # 7670 + 2887, 2033, 4040, 1205, 3392, 8008, 1765, 3393, 3164, 2124, 1271, 1689, 714, 4551, 3491, 8009, # 7686 + 2328, 3852, 533, 4273, 3605, 2181, 617, 8010, 2464, 3308, 3492, 2310, 8011, 8012, 3165, 8013, # 7702 + 8014, 3853, 1987, 618, 427, 2641, 3493, 3394, 8015, 8016, 1244, 1690, 8017, 2806, 4274, 4552, # 7718 + 8018, 3494, 8019, 8020, 2279, 1576, 473, 3606, 4275, 3395, 972, 8021, 3607, 8022, 3067, 8023, # 7734 + 8024, 4553, 4554, 8025, 3727, 4041, 4042, 8026, 153, 4555, 356, 8027, 1891, 2888, 4276, 2143, # 7750 + 408, 803, 2352, 8028, 3854, 8029, 4277, 1646, 2570, 2511, 4556, 4557, 3855, 8030, 3856, 4278, # 7766 + 8031, 2411, 3396, 752, 8032, 8033, 1961, 2964, 8034, 746, 3012, 2465, 8035, 4279, 3728, 698, # 7782 + 4558, 1892, 4280, 3608, 2543, 4559, 3609, 3857, 8036, 3166, 3397, 8037, 1823, 1302, 4043, 2706, # 7798 + 3858, 1973, 4281, 8038, 4282, 3167, 823, 1303, 1288, 1236, 2848, 3495, 4044, 3398, 774, 3859, # 7814 + 8039, 1581, 4560, 1304, 2849, 3860, 4561, 8040, 2435, 2161, 1083, 3237, 4283, 4045, 4284, 344, # 7830 + 1173, 288, 2311, 454, 1683, 8041, 8042, 1461, 4562, 4046, 2589, 8043, 8044, 4563, 985, 894, # 7846 + 8045, 3399, 3168, 8046, 1913, 2928, 3729, 1988, 8047, 2110, 1974, 8048, 4047, 8049, 2571, 1194, # 7862 + 425, 8050, 4564, 3169, 1245, 3730, 4285, 8051, 8052, 2850, 8053, 636, 4565, 1855, 3861, 760, # 7878 + 1799, 8054, 4286, 2209, 1508, 4566, 4048, 1893, 1684, 2293, 8055, 8056, 8057, 4287, 4288, 2210, # 7894 + 479, 8058, 8059, 832, 8060, 4049, 2489, 8061, 2965, 2490, 3731, 990, 3109, 627, 1814, 2642, # 7910 + 4289, 1582, 4290, 2125, 2111, 3496, 4567, 8062, 799, 4291, 3170, 8063, 4568, 2112, 1737, 3013, # 7926 + 1018, 543, 754, 4292, 3309, 1676, 4569, 4570, 4050, 8064, 1489, 8065, 3497, 8066, 2614, 2889, # 7942 + 4051, 8067, 8068, 2966, 8069, 8070, 8071, 8072, 3171, 4571, 4572, 2182, 1722, 8073, 3238, 3239, # 7958 + 1842, 3610, 1715, 481, 365, 1975, 1856, 8074, 8075, 1962, 2491, 4573, 8076, 2126, 3611, 3240, # 7974 + 433, 1894, 2063, 2075, 8077, 602, 2741, 8078, 8079, 8080, 8081, 8082, 3014, 1628, 3400, 8083, # 7990 + 3172, 4574, 4052, 2890, 4575, 2512, 8084, 2544, 2772, 8085, 8086, 8087, 3310, 4576, 2891, 8088, # 8006 + 4577, 8089, 2851, 4578, 4579, 1221, 2967, 4053, 2513, 8090, 8091, 8092, 1867, 1989, 8093, 8094, # 8022 + 8095, 1895, 8096, 8097, 4580, 1896, 4054, 318, 8098, 2094, 4055, 4293, 8099, 8100, 485, 8101, # 8038 + 938, 3862, 553, 2670, 116, 8102, 3863, 3612, 8103, 3498, 2671, 2773, 3401, 3311, 2807, 8104, # 8054 + 3613, 2929, 4056, 1747, 2930, 2968, 8105, 8106, 207, 8107, 8108, 2672, 4581, 2514, 8109, 3015, # 8070 + 890, 3614, 3864, 8110, 1877, 3732, 3402, 8111, 2183, 2353, 3403, 1652, 8112, 8113, 8114, 941, # 8086 + 2294, 208, 3499, 4057, 2019, 330, 4294, 3865, 2892, 2492, 3733, 4295, 8115, 8116, 8117, 8118, # 8102 ) - +# fmt: on diff --git a/env/Lib/site-packages/pip/_vendor/chardet/euctwprober.py b/env/Lib/site-packages/pip/_vendor/chardet/euctwprober.py index 35669cc4dd809fa4007ee67c752f1be991135e77..a37ab18995822ad6b3372d56366becdccf9a4c26 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/euctwprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/euctwprober.py @@ -25,22 +25,23 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine from .chardistribution import EUCTWDistributionAnalysis +from .codingstatemachine import CodingStateMachine +from .mbcharsetprober import MultiByteCharSetProber from .mbcssm import EUCTW_SM_MODEL + class EUCTWProber(MultiByteCharSetProber): - def __init__(self): - super(EUCTWProber, self).__init__() + def __init__(self) -> None: + super().__init__() self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL) self.distribution_analyzer = EUCTWDistributionAnalysis() self.reset() @property - def charset_name(self): + def charset_name(self) -> str: return "EUC-TW" @property - def language(self): + def language(self) -> str: return "Taiwan" diff --git a/env/Lib/site-packages/pip/_vendor/chardet/gb2312freq.py b/env/Lib/site-packages/pip/_vendor/chardet/gb2312freq.py index 697837bd9a87a77fc468fd1f10dd470d01701362..b32bfc74213d93d434f1f3a47cb5d7d0bf4863d3 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/gb2312freq.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/gb2312freq.py @@ -43,6 +43,7 @@ GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9 GB2312_TABLE_SIZE = 3760 +# fmt: off GB2312_CHAR_TO_FREQ_ORDER = ( 1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205, 2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842, @@ -280,4 +281,4 @@ GB2312_CHAR_TO_FREQ_ORDER = ( 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189, 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512 ) - +# fmt: on diff --git a/env/Lib/site-packages/pip/_vendor/chardet/gb2312prober.py b/env/Lib/site-packages/pip/_vendor/chardet/gb2312prober.py index 8446d2dd959721cc86d4ae5a7699197454f3aa91..d423e7311e2fbd9a014de808c107e96ad11c66e5 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/gb2312prober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/gb2312prober.py @@ -25,22 +25,23 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine from .chardistribution import GB2312DistributionAnalysis +from .codingstatemachine import CodingStateMachine +from .mbcharsetprober import MultiByteCharSetProber from .mbcssm import GB2312_SM_MODEL + class GB2312Prober(MultiByteCharSetProber): - def __init__(self): - super(GB2312Prober, self).__init__() + def __init__(self) -> None: + super().__init__() self.coding_sm = CodingStateMachine(GB2312_SM_MODEL) self.distribution_analyzer = GB2312DistributionAnalysis() self.reset() @property - def charset_name(self): + def charset_name(self) -> str: return "GB2312" @property - def language(self): + def language(self) -> str: return "Chinese" diff --git a/env/Lib/site-packages/pip/_vendor/chardet/hebrewprober.py b/env/Lib/site-packages/pip/_vendor/chardet/hebrewprober.py index b0e1bf49268203d1f9d14cbe73753d95dc66c8a4..785d0057bcc0ea74a4b8d65ab7a0de78474bf892 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/hebrewprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/hebrewprober.py @@ -25,8 +25,11 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +from typing import Optional, Union + from .charsetprober import CharSetProber from .enums import ProbingState +from .sbcharsetprober import SingleByteCharSetProber # This prober doesn't actually recognize a language or a charset. # It is a helper prober for the use of the Hebrew model probers @@ -125,18 +128,20 @@ from .enums import ProbingState # model probers scores. The answer is returned in the form of the name of the # charset identified, either "windows-1255" or "ISO-8859-8". + class HebrewProber(CharSetProber): + SPACE = 0x20 # windows-1255 / ISO-8859-8 code points of interest - FINAL_KAF = 0xea - NORMAL_KAF = 0xeb - FINAL_MEM = 0xed - NORMAL_MEM = 0xee - FINAL_NUN = 0xef - NORMAL_NUN = 0xf0 - FINAL_PE = 0xf3 - NORMAL_PE = 0xf4 - FINAL_TSADI = 0xf5 - NORMAL_TSADI = 0xf6 + FINAL_KAF = 0xEA + NORMAL_KAF = 0xEB + FINAL_MEM = 0xED + NORMAL_MEM = 0xEE + FINAL_NUN = 0xEF + NORMAL_NUN = 0xF0 + FINAL_PE = 0xF3 + NORMAL_PE = 0xF4 + FINAL_TSADI = 0xF5 + NORMAL_TSADI = 0xF6 # Minimum Visual vs Logical final letter score difference. # If the difference is below this, don't rely solely on the final letter score @@ -151,35 +156,44 @@ class HebrewProber(CharSetProber): VISUAL_HEBREW_NAME = "ISO-8859-8" LOGICAL_HEBREW_NAME = "windows-1255" - def __init__(self): - super(HebrewProber, self).__init__() - self._final_char_logical_score = None - self._final_char_visual_score = None - self._prev = None - self._before_prev = None - self._logical_prober = None - self._visual_prober = None + def __init__(self) -> None: + super().__init__() + self._final_char_logical_score = 0 + self._final_char_visual_score = 0 + self._prev = self.SPACE + self._before_prev = self.SPACE + self._logical_prober: Optional[SingleByteCharSetProber] = None + self._visual_prober: Optional[SingleByteCharSetProber] = None self.reset() - def reset(self): + def reset(self) -> None: self._final_char_logical_score = 0 self._final_char_visual_score = 0 # The two last characters seen in the previous buffer, # mPrev and mBeforePrev are initialized to space in order to simulate # a word delimiter at the beginning of the data - self._prev = ' ' - self._before_prev = ' ' + self._prev = self.SPACE + self._before_prev = self.SPACE # These probers are owned by the group prober. - def set_model_probers(self, logicalProber, visualProber): - self._logical_prober = logicalProber - self._visual_prober = visualProber + def set_model_probers( + self, + logical_prober: SingleByteCharSetProber, + visual_prober: SingleByteCharSetProber, + ) -> None: + self._logical_prober = logical_prober + self._visual_prober = visual_prober - def is_final(self, c): - return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN, - self.FINAL_PE, self.FINAL_TSADI] + def is_final(self, c: int) -> bool: + return c in [ + self.FINAL_KAF, + self.FINAL_MEM, + self.FINAL_NUN, + self.FINAL_PE, + self.FINAL_TSADI, + ] - def is_non_final(self, c): + def is_non_final(self, c: int) -> bool: # The normal Tsadi is not a good Non-Final letter due to words like # 'lechotet' (to chat) containing an apostrophe after the tsadi. This # apostrophe is converted to a space in FilterWithoutEnglishLetters @@ -190,10 +204,9 @@ class HebrewProber(CharSetProber): # for example legally end with a Non-Final Pe or Kaf. However, the # benefit of these letters as Non-Final letters outweighs the damage # since these words are quite rare. - return c in [self.NORMAL_KAF, self.NORMAL_MEM, - self.NORMAL_NUN, self.NORMAL_PE] + return c in [self.NORMAL_KAF, self.NORMAL_MEM, self.NORMAL_NUN, self.NORMAL_PE] - def feed(self, byte_str): + def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: # Final letter analysis for logical-visual decision. # Look for evidence that the received buffer is either logical Hebrew # or visual Hebrew. @@ -227,9 +240,9 @@ class HebrewProber(CharSetProber): byte_str = self.filter_high_byte_only(byte_str) for cur in byte_str: - if cur == ' ': + if cur == self.SPACE: # We stand on a space - a word just ended - if self._before_prev != ' ': + if self._before_prev != self.SPACE: # next-to-last char was not a space so self._prev is not a # 1 letter word if self.is_final(self._prev): @@ -241,8 +254,11 @@ class HebrewProber(CharSetProber): self._final_char_visual_score += 1 else: # Not standing on a space - if ((self._before_prev == ' ') and - (self.is_final(self._prev)) and (cur != ' ')): + if ( + (self._before_prev == self.SPACE) + and (self.is_final(self._prev)) + and (cur != self.SPACE) + ): # case (3) [-2:space][-1:final letter][cur:not space] self._final_char_visual_score += 1 self._before_prev = self._prev @@ -253,7 +269,10 @@ class HebrewProber(CharSetProber): return ProbingState.DETECTING @property - def charset_name(self): + def charset_name(self) -> str: + assert self._logical_prober is not None + assert self._visual_prober is not None + # Make the decision: is it Logical or Visual? # If the final letter score distance is dominant enough, rely on it. finalsub = self._final_char_logical_score - self._final_char_visual_score @@ -263,8 +282,9 @@ class HebrewProber(CharSetProber): return self.VISUAL_HEBREW_NAME # It's not dominant enough, try to rely on the model scores instead. - modelsub = (self._logical_prober.get_confidence() - - self._visual_prober.get_confidence()) + modelsub = ( + self._logical_prober.get_confidence() - self._visual_prober.get_confidence() + ) if modelsub > self.MIN_MODEL_DISTANCE: return self.LOGICAL_HEBREW_NAME if modelsub < -self.MIN_MODEL_DISTANCE: @@ -280,13 +300,17 @@ class HebrewProber(CharSetProber): return self.LOGICAL_HEBREW_NAME @property - def language(self): - return 'Hebrew' + def language(self) -> str: + return "Hebrew" @property - def state(self): + def state(self) -> ProbingState: + assert self._logical_prober is not None + assert self._visual_prober is not None + # Remain active as long as any of the model probers are active. - if (self._logical_prober.state == ProbingState.NOT_ME) and \ - (self._visual_prober.state == ProbingState.NOT_ME): + if (self._logical_prober.state == ProbingState.NOT_ME) and ( + self._visual_prober.state == ProbingState.NOT_ME + ): return ProbingState.NOT_ME return ProbingState.DETECTING diff --git a/env/Lib/site-packages/pip/_vendor/chardet/jisfreq.py b/env/Lib/site-packages/pip/_vendor/chardet/jisfreq.py index 83fc082b545106d02622de20f2083e8a7562f96c..3293576e012a1c931b5e89ebc065c67b65941084 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/jisfreq.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/jisfreq.py @@ -46,6 +46,7 @@ JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0 # Char to FreqOrder table , JIS_TABLE_SIZE = 4368 +# fmt: off JIS_CHAR_TO_FREQ_ORDER = ( 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16 3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32 @@ -321,5 +322,4 @@ JIS_CHAR_TO_FREQ_ORDER = ( 1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352 2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512 ) - - +# fmt: on diff --git a/env/Lib/site-packages/pip/_vendor/chardet/jpcntx.py b/env/Lib/site-packages/pip/_vendor/chardet/jpcntx.py index 20044e4bc8f5a9775d82be0ecf387ce752732507..2f53bdda09e92da38e31cac1a6d415f4670137f7 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/jpcntx.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/jpcntx.py @@ -25,110 +25,114 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +from typing import List, Tuple, Union # This is hiragana 2-char sequence table, the number in each cell represents its frequency category -jp2CharContext = ( -(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), -(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), -(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), -(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4), -(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), -(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4), -(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), -(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3), -(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), -(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4), -(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4), -(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3), -(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3), -(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3), -(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4), -(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3), -(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4), -(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3), -(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), -(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), -(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), -(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), -(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), -(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), -(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), -(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), -(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), -(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), -(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), -(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), -(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), -(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), -(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), -(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), -(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), -(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), -(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), -(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), -(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), -(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), -(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), -(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), -(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), -(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), -(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), -(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), -(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), -(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), -(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), -(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), -(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), -(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), -(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), -(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), -(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), -(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), -(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), -(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), -(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), -(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), -(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), -(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), -(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), -(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), -(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), -(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), -(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), -(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), -(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), -(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), -(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), -(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), -(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), -(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), -(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), -(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), -(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), -(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), -(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), -(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), -(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), -(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), -(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), +# fmt: off +jp2_char_context = ( + (0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1), + (2, 4, 0, 4, 0, 3, 0, 4, 0, 3, 4, 4, 4, 2, 4, 3, 3, 4, 3, 2, 3, 3, 4, 2, 3, 3, 3, 2, 4, 1, 4, 3, 3, 1, 5, 4, 3, 4, 3, 4, 3, 5, 3, 0, 3, 5, 4, 2, 0, 3, 1, 0, 3, 3, 0, 3, 3, 0, 1, 1, 0, 4, 3, 0, 3, 3, 0, 4, 0, 2, 0, 3, 5, 5, 5, 5, 4, 0, 4, 1, 0, 3, 4), + (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2), + (0, 4, 0, 5, 0, 5, 0, 4, 0, 4, 5, 4, 4, 3, 5, 3, 5, 1, 5, 3, 4, 3, 4, 4, 3, 4, 3, 3, 4, 3, 5, 4, 4, 3, 5, 5, 3, 5, 5, 5, 3, 5, 5, 3, 4, 5, 5, 3, 1, 3, 2, 0, 3, 4, 0, 4, 2, 0, 4, 2, 1, 5, 3, 2, 3, 5, 0, 4, 0, 2, 0, 5, 4, 4, 5, 4, 5, 0, 4, 0, 0, 4, 4), + (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + (0, 3, 0, 4, 0, 3, 0, 3, 0, 4, 5, 4, 3, 3, 3, 3, 4, 3, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 4, 4, 4, 4, 5, 3, 4, 4, 3, 4, 5, 5, 4, 5, 5, 1, 4, 5, 4, 3, 0, 3, 3, 1, 3, 3, 0, 4, 4, 0, 3, 3, 1, 5, 3, 3, 3, 5, 0, 4, 0, 3, 0, 4, 4, 3, 4, 3, 3, 0, 4, 1, 1, 3, 4), + (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + (0, 4, 0, 3, 0, 3, 0, 4, 0, 3, 4, 4, 3, 2, 2, 1, 2, 1, 3, 1, 3, 3, 3, 3, 3, 4, 3, 1, 3, 3, 5, 3, 3, 0, 4, 3, 0, 5, 4, 3, 3, 5, 4, 4, 3, 4, 4, 5, 0, 1, 2, 0, 1, 2, 0, 2, 2, 0, 1, 0, 0, 5, 2, 2, 1, 4, 0, 3, 0, 1, 0, 4, 4, 3, 5, 4, 3, 0, 2, 1, 0, 4, 3), + (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + (0, 3, 0, 5, 0, 4, 0, 2, 1, 4, 4, 2, 4, 1, 4, 2, 4, 2, 4, 3, 3, 3, 4, 3, 3, 3, 3, 1, 4, 2, 3, 3, 3, 1, 4, 4, 1, 1, 1, 4, 3, 3, 2, 0, 2, 4, 3, 2, 0, 3, 3, 0, 3, 1, 1, 0, 0, 0, 3, 3, 0, 4, 2, 2, 3, 4, 0, 4, 0, 3, 0, 4, 4, 5, 3, 4, 4, 0, 3, 0, 0, 1, 4), + (1, 4, 0, 4, 0, 4, 0, 4, 0, 3, 5, 4, 4, 3, 4, 3, 5, 4, 3, 3, 4, 3, 5, 4, 4, 4, 4, 3, 4, 2, 4, 3, 3, 1, 5, 4, 3, 2, 4, 5, 4, 5, 5, 4, 4, 5, 4, 4, 0, 3, 2, 2, 3, 3, 0, 4, 3, 1, 3, 2, 1, 4, 3, 3, 4, 5, 0, 3, 0, 2, 0, 4, 5, 5, 4, 5, 4, 0, 4, 0, 0, 5, 4), + (0, 5, 0, 5, 0, 4, 0, 3, 0, 4, 4, 3, 4, 3, 3, 3, 4, 0, 4, 4, 4, 3, 4, 3, 4, 3, 3, 1, 4, 2, 4, 3, 4, 0, 5, 4, 1, 4, 5, 4, 4, 5, 3, 2, 4, 3, 4, 3, 2, 4, 1, 3, 3, 3, 2, 3, 2, 0, 4, 3, 3, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 5, 4, 4, 4, 3, 0, 4, 1, 0, 1, 3), + (0, 3, 1, 4, 0, 3, 0, 2, 0, 3, 4, 4, 3, 1, 4, 2, 3, 3, 4, 3, 4, 3, 4, 3, 4, 4, 3, 2, 3, 1, 5, 4, 4, 1, 4, 4, 3, 5, 4, 4, 3, 5, 5, 4, 3, 4, 4, 3, 1, 2, 3, 1, 2, 2, 0, 3, 2, 0, 3, 1, 0, 5, 3, 3, 3, 4, 3, 3, 3, 3, 4, 4, 4, 4, 5, 4, 2, 0, 3, 3, 2, 4, 3), + (0, 2, 0, 3, 0, 1, 0, 1, 0, 0, 3, 2, 0, 0, 2, 0, 1, 0, 2, 1, 3, 3, 3, 1, 2, 3, 1, 0, 1, 0, 4, 2, 1, 1, 3, 3, 0, 4, 3, 3, 1, 4, 3, 3, 0, 3, 3, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 4, 1, 0, 2, 3, 2, 2, 2, 1, 3, 3, 3, 4, 4, 3, 2, 0, 3, 1, 0, 3, 3), + (0, 4, 0, 4, 0, 3, 0, 3, 0, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 3, 4, 2, 4, 3, 4, 3, 3, 2, 4, 3, 4, 5, 4, 1, 4, 5, 3, 5, 4, 5, 3, 5, 4, 0, 3, 5, 5, 3, 1, 3, 3, 2, 2, 3, 0, 3, 4, 1, 3, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 5, 4, 4, 5, 3, 0, 4, 1, 0, 3, 4), + (0, 2, 0, 3, 0, 3, 0, 0, 0, 2, 2, 2, 1, 0, 1, 0, 0, 0, 3, 0, 3, 0, 3, 0, 1, 3, 1, 0, 3, 1, 3, 3, 3, 1, 3, 3, 3, 0, 1, 3, 1, 3, 4, 0, 0, 3, 1, 1, 0, 3, 2, 0, 0, 0, 0, 1, 3, 0, 1, 0, 0, 3, 3, 2, 0, 3, 0, 0, 0, 0, 0, 3, 4, 3, 4, 3, 3, 0, 3, 0, 0, 2, 3), + (2, 3, 0, 3, 0, 2, 0, 1, 0, 3, 3, 4, 3, 1, 3, 1, 1, 1, 3, 1, 4, 3, 4, 3, 3, 3, 0, 0, 3, 1, 5, 4, 3, 1, 4, 3, 2, 5, 5, 4, 4, 4, 4, 3, 3, 4, 4, 4, 0, 2, 1, 1, 3, 2, 0, 1, 2, 0, 0, 1, 0, 4, 1, 3, 3, 3, 0, 3, 0, 1, 0, 4, 4, 4, 5, 5, 3, 0, 2, 0, 0, 4, 4), + (0, 2, 0, 1, 0, 3, 1, 3, 0, 2, 3, 3, 3, 0, 3, 1, 0, 0, 3, 0, 3, 2, 3, 1, 3, 2, 1, 1, 0, 0, 4, 2, 1, 0, 2, 3, 1, 4, 3, 2, 0, 4, 4, 3, 1, 3, 1, 3, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 4, 1, 1, 1, 2, 0, 3, 0, 0, 0, 3, 4, 2, 4, 3, 2, 0, 1, 0, 0, 3, 3), + (0, 1, 0, 4, 0, 5, 0, 4, 0, 2, 4, 4, 2, 3, 3, 2, 3, 3, 5, 3, 3, 3, 4, 3, 4, 2, 3, 0, 4, 3, 3, 3, 4, 1, 4, 3, 2, 1, 5, 5, 3, 4, 5, 1, 3, 5, 4, 2, 0, 3, 3, 0, 1, 3, 0, 4, 2, 0, 1, 3, 1, 4, 3, 3, 3, 3, 0, 3, 0, 1, 0, 3, 4, 4, 4, 5, 5, 0, 3, 0, 1, 4, 5), + (0, 2, 0, 3, 0, 3, 0, 0, 0, 2, 3, 1, 3, 0, 4, 0, 1, 1, 3, 0, 3, 4, 3, 2, 3, 1, 0, 3, 3, 2, 3, 1, 3, 0, 2, 3, 0, 2, 1, 4, 1, 2, 2, 0, 0, 3, 3, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 2, 2, 0, 3, 2, 1, 3, 3, 0, 2, 0, 2, 0, 0, 3, 3, 1, 2, 4, 0, 3, 0, 2, 2, 3), + (2, 4, 0, 5, 0, 4, 0, 4, 0, 2, 4, 4, 4, 3, 4, 3, 3, 3, 1, 2, 4, 3, 4, 3, 4, 4, 5, 0, 3, 3, 3, 3, 2, 0, 4, 3, 1, 4, 3, 4, 1, 4, 4, 3, 3, 4, 4, 3, 1, 2, 3, 0, 4, 2, 0, 4, 1, 0, 3, 3, 0, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 3, 5, 3, 4, 5, 2, 0, 3, 0, 0, 4, 5), + (0, 3, 0, 4, 0, 1, 0, 1, 0, 1, 3, 2, 2, 1, 3, 0, 3, 0, 2, 0, 2, 0, 3, 0, 2, 0, 0, 0, 1, 0, 1, 1, 0, 0, 3, 1, 0, 0, 0, 4, 0, 3, 1, 0, 2, 1, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 2, 2, 3, 1, 0, 3, 0, 0, 0, 1, 4, 4, 4, 3, 0, 0, 4, 0, 0, 1, 4), + (1, 4, 1, 5, 0, 3, 0, 3, 0, 4, 5, 4, 4, 3, 5, 3, 3, 4, 4, 3, 4, 1, 3, 3, 3, 3, 2, 1, 4, 1, 5, 4, 3, 1, 4, 4, 3, 5, 4, 4, 3, 5, 4, 3, 3, 4, 4, 4, 0, 3, 3, 1, 2, 3, 0, 3, 1, 0, 3, 3, 0, 5, 4, 4, 4, 4, 4, 4, 3, 3, 5, 4, 4, 3, 3, 5, 4, 0, 3, 2, 0, 4, 4), + (0, 2, 0, 3, 0, 1, 0, 0, 0, 1, 3, 3, 3, 2, 4, 1, 3, 0, 3, 1, 3, 0, 2, 2, 1, 1, 0, 0, 2, 0, 4, 3, 1, 0, 4, 3, 0, 4, 4, 4, 1, 4, 3, 1, 1, 3, 3, 1, 0, 2, 0, 0, 1, 3, 0, 0, 0, 0, 2, 0, 0, 4, 3, 2, 4, 3, 5, 4, 3, 3, 3, 4, 3, 3, 4, 3, 3, 0, 2, 1, 0, 3, 3), + (0, 2, 0, 4, 0, 3, 0, 2, 0, 2, 5, 5, 3, 4, 4, 4, 4, 1, 4, 3, 3, 0, 4, 3, 4, 3, 1, 3, 3, 2, 4, 3, 0, 3, 4, 3, 0, 3, 4, 4, 2, 4, 4, 0, 4, 5, 3, 3, 2, 2, 1, 1, 1, 2, 0, 1, 5, 0, 3, 3, 2, 4, 3, 3, 3, 4, 0, 3, 0, 2, 0, 4, 4, 3, 5, 5, 0, 0, 3, 0, 2, 3, 3), + (0, 3, 0, 4, 0, 3, 0, 1, 0, 3, 4, 3, 3, 1, 3, 3, 3, 0, 3, 1, 3, 0, 4, 3, 3, 1, 1, 0, 3, 0, 3, 3, 0, 0, 4, 4, 0, 1, 5, 4, 3, 3, 5, 0, 3, 3, 4, 3, 0, 2, 0, 1, 1, 1, 0, 1, 3, 0, 1, 2, 1, 3, 3, 2, 3, 3, 0, 3, 0, 1, 0, 1, 3, 3, 4, 4, 1, 0, 1, 2, 2, 1, 3), + (0, 1, 0, 4, 0, 4, 0, 3, 0, 1, 3, 3, 3, 2, 3, 1, 1, 0, 3, 0, 3, 3, 4, 3, 2, 4, 2, 0, 1, 0, 4, 3, 2, 0, 4, 3, 0, 5, 3, 3, 2, 4, 4, 4, 3, 3, 3, 4, 0, 1, 3, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 4, 2, 3, 3, 3, 0, 3, 0, 0, 0, 4, 4, 4, 5, 3, 2, 0, 3, 3, 0, 3, 5), + (0, 2, 0, 3, 0, 0, 0, 3, 0, 1, 3, 0, 2, 0, 0, 0, 1, 0, 3, 1, 1, 3, 3, 0, 0, 3, 0, 0, 3, 0, 2, 3, 1, 0, 3, 1, 0, 3, 3, 2, 0, 4, 2, 2, 0, 2, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 0, 1, 0, 1, 0, 0, 0, 1, 3, 1, 2, 0, 0, 0, 1, 0, 0, 1, 4), + (0, 3, 0, 3, 0, 5, 0, 1, 0, 2, 4, 3, 1, 3, 3, 2, 1, 1, 5, 2, 1, 0, 5, 1, 2, 0, 0, 0, 3, 3, 2, 2, 3, 2, 4, 3, 0, 0, 3, 3, 1, 3, 3, 0, 2, 5, 3, 4, 0, 3, 3, 0, 1, 2, 0, 2, 2, 0, 3, 2, 0, 2, 2, 3, 3, 3, 0, 2, 0, 1, 0, 3, 4, 4, 2, 5, 4, 0, 3, 0, 0, 3, 5), + (0, 3, 0, 3, 0, 3, 0, 1, 0, 3, 3, 3, 3, 0, 3, 0, 2, 0, 2, 1, 1, 0, 2, 0, 1, 0, 0, 0, 2, 1, 0, 0, 1, 0, 3, 2, 0, 0, 3, 3, 1, 2, 3, 1, 0, 3, 3, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 3, 1, 2, 3, 0, 3, 0, 1, 0, 3, 2, 1, 0, 4, 3, 0, 1, 1, 0, 3, 3), + (0, 4, 0, 5, 0, 3, 0, 3, 0, 4, 5, 5, 4, 3, 5, 3, 4, 3, 5, 3, 3, 2, 5, 3, 4, 4, 4, 3, 4, 3, 4, 5, 5, 3, 4, 4, 3, 4, 4, 5, 4, 4, 4, 3, 4, 5, 5, 4, 2, 3, 4, 2, 3, 4, 0, 3, 3, 1, 4, 3, 2, 4, 3, 3, 5, 5, 0, 3, 0, 3, 0, 5, 5, 5, 5, 4, 4, 0, 4, 0, 1, 4, 4), + (0, 4, 0, 4, 0, 3, 0, 3, 0, 3, 5, 4, 4, 2, 3, 2, 5, 1, 3, 2, 5, 1, 4, 2, 3, 2, 3, 3, 4, 3, 3, 3, 3, 2, 5, 4, 1, 3, 3, 5, 3, 4, 4, 0, 4, 4, 3, 1, 1, 3, 1, 0, 2, 3, 0, 2, 3, 0, 3, 0, 0, 4, 3, 1, 3, 4, 0, 3, 0, 2, 0, 4, 4, 4, 3, 4, 5, 0, 4, 0, 0, 3, 4), + (0, 3, 0, 3, 0, 3, 1, 2, 0, 3, 4, 4, 3, 3, 3, 0, 2, 2, 4, 3, 3, 1, 3, 3, 3, 1, 1, 0, 3, 1, 4, 3, 2, 3, 4, 4, 2, 4, 4, 4, 3, 4, 4, 3, 2, 4, 4, 3, 1, 3, 3, 1, 3, 3, 0, 4, 1, 0, 2, 2, 1, 4, 3, 2, 3, 3, 5, 4, 3, 3, 5, 4, 4, 3, 3, 0, 4, 0, 3, 2, 2, 4, 4), + (0, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 3, 0, 0, 0, 0, 0, 2, 0, 1, 2, 1, 0, 0, 1, 0, 0, 0, 0, 3, 0, 0, 1, 0, 1, 1, 3, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 0, 3, 4, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1), + (0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 4, 1, 4, 0, 3, 0, 4, 0, 3, 0, 4, 0, 3, 0, 3, 0, 4, 1, 5, 1, 4, 0, 0, 3, 0, 5, 0, 5, 2, 0, 1, 0, 0, 0, 2, 1, 4, 0, 1, 3, 0, 0, 3, 0, 0, 3, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0), + (1, 4, 0, 5, 0, 3, 0, 2, 0, 3, 5, 4, 4, 3, 4, 3, 5, 3, 4, 3, 3, 0, 4, 3, 3, 3, 3, 3, 3, 2, 4, 4, 3, 1, 3, 4, 4, 5, 4, 4, 3, 4, 4, 1, 3, 5, 4, 3, 3, 3, 1, 2, 2, 3, 3, 1, 3, 1, 3, 3, 3, 5, 3, 3, 4, 5, 0, 3, 0, 3, 0, 3, 4, 3, 4, 4, 3, 0, 3, 0, 2, 4, 3), + (0, 1, 0, 4, 0, 0, 0, 0, 0, 1, 4, 0, 4, 1, 4, 2, 4, 0, 3, 0, 1, 0, 1, 0, 0, 0, 0, 0, 2, 0, 3, 1, 1, 1, 0, 3, 0, 0, 0, 1, 2, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 3, 0, 0, 0, 0, 3, 2, 0, 2, 2, 0, 1, 0, 0, 0, 2, 3, 2, 3, 3, 0, 0, 0, 0, 2, 1, 0), + (0, 5, 1, 5, 0, 3, 0, 3, 0, 5, 4, 4, 5, 1, 5, 3, 3, 0, 4, 3, 4, 3, 5, 3, 4, 3, 3, 2, 4, 3, 4, 3, 3, 0, 3, 3, 1, 4, 4, 3, 4, 4, 4, 3, 4, 5, 5, 3, 2, 3, 1, 1, 3, 3, 1, 3, 1, 1, 3, 3, 2, 4, 5, 3, 3, 5, 0, 4, 0, 3, 0, 4, 4, 3, 5, 3, 3, 0, 3, 4, 0, 4, 3), + (0, 5, 0, 5, 0, 3, 0, 2, 0, 4, 4, 3, 5, 2, 4, 3, 3, 3, 4, 4, 4, 3, 5, 3, 5, 3, 3, 1, 4, 0, 4, 3, 3, 0, 3, 3, 0, 4, 4, 4, 4, 5, 4, 3, 3, 5, 5, 3, 2, 3, 1, 2, 3, 2, 0, 1, 0, 0, 3, 2, 2, 4, 4, 3, 1, 5, 0, 4, 0, 3, 0, 4, 3, 1, 3, 2, 1, 0, 3, 3, 0, 3, 3), + (0, 4, 0, 5, 0, 5, 0, 4, 0, 4, 5, 5, 5, 3, 4, 3, 3, 2, 5, 4, 4, 3, 5, 3, 5, 3, 4, 0, 4, 3, 4, 4, 3, 2, 4, 4, 3, 4, 5, 4, 4, 5, 5, 0, 3, 5, 5, 4, 1, 3, 3, 2, 3, 3, 1, 3, 1, 0, 4, 3, 1, 4, 4, 3, 4, 5, 0, 4, 0, 2, 0, 4, 3, 4, 4, 3, 3, 0, 4, 0, 0, 5, 5), + (0, 4, 0, 4, 0, 5, 0, 1, 1, 3, 3, 4, 4, 3, 4, 1, 3, 0, 5, 1, 3, 0, 3, 1, 3, 1, 1, 0, 3, 0, 3, 3, 4, 0, 4, 3, 0, 4, 4, 4, 3, 4, 4, 0, 3, 5, 4, 1, 0, 3, 0, 0, 2, 3, 0, 3, 1, 0, 3, 1, 0, 3, 2, 1, 3, 5, 0, 3, 0, 1, 0, 3, 2, 3, 3, 4, 4, 0, 2, 2, 0, 4, 4), + (2, 4, 0, 5, 0, 4, 0, 3, 0, 4, 5, 5, 4, 3, 5, 3, 5, 3, 5, 3, 5, 2, 5, 3, 4, 3, 3, 4, 3, 4, 5, 3, 2, 1, 5, 4, 3, 2, 3, 4, 5, 3, 4, 1, 2, 5, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 3, 0, 4, 1, 0, 3, 4, 3, 3, 5, 0, 3, 0, 1, 0, 4, 5, 5, 5, 4, 3, 0, 4, 2, 0, 3, 5), + (0, 5, 0, 4, 0, 4, 0, 2, 0, 5, 4, 3, 4, 3, 4, 3, 3, 3, 4, 3, 4, 2, 5, 3, 5, 3, 4, 1, 4, 3, 4, 4, 4, 0, 3, 5, 0, 4, 4, 4, 4, 5, 3, 1, 3, 4, 5, 3, 3, 3, 3, 3, 3, 3, 0, 2, 2, 0, 3, 3, 2, 4, 3, 3, 3, 5, 3, 4, 1, 3, 3, 5, 3, 2, 0, 0, 0, 0, 4, 3, 1, 3, 3), + (0, 1, 0, 3, 0, 3, 0, 1, 0, 1, 3, 3, 3, 2, 3, 3, 3, 0, 3, 0, 0, 0, 3, 1, 3, 0, 0, 0, 2, 2, 2, 3, 0, 0, 3, 2, 0, 1, 2, 4, 1, 3, 3, 0, 0, 3, 3, 3, 0, 1, 0, 0, 2, 1, 0, 0, 3, 0, 3, 1, 0, 3, 0, 0, 1, 3, 0, 2, 0, 1, 0, 3, 3, 1, 3, 3, 0, 0, 1, 1, 0, 3, 3), + (0, 2, 0, 3, 0, 2, 1, 4, 0, 2, 2, 3, 1, 1, 3, 1, 1, 0, 2, 0, 3, 1, 2, 3, 1, 3, 0, 0, 1, 0, 4, 3, 2, 3, 3, 3, 1, 4, 2, 3, 3, 3, 3, 1, 0, 3, 1, 4, 0, 1, 1, 0, 1, 2, 0, 1, 1, 0, 1, 1, 0, 3, 1, 3, 2, 2, 0, 1, 0, 0, 0, 2, 3, 3, 3, 1, 0, 0, 0, 0, 0, 2, 3), + (0, 5, 0, 4, 0, 5, 0, 2, 0, 4, 5, 5, 3, 3, 4, 3, 3, 1, 5, 4, 4, 2, 4, 4, 4, 3, 4, 2, 4, 3, 5, 5, 4, 3, 3, 4, 3, 3, 5, 5, 4, 5, 5, 1, 3, 4, 5, 3, 1, 4, 3, 1, 3, 3, 0, 3, 3, 1, 4, 3, 1, 4, 5, 3, 3, 5, 0, 4, 0, 3, 0, 5, 3, 3, 1, 4, 3, 0, 4, 0, 1, 5, 3), + (0, 5, 0, 5, 0, 4, 0, 2, 0, 4, 4, 3, 4, 3, 3, 3, 3, 3, 5, 4, 4, 4, 4, 4, 4, 5, 3, 3, 5, 2, 4, 4, 4, 3, 4, 4, 3, 3, 4, 4, 5, 5, 3, 3, 4, 3, 4, 3, 3, 4, 3, 3, 3, 3, 1, 2, 2, 1, 4, 3, 3, 5, 4, 4, 3, 4, 0, 4, 0, 3, 0, 4, 4, 4, 4, 4, 1, 0, 4, 2, 0, 2, 4), + (0, 4, 0, 4, 0, 3, 0, 1, 0, 3, 5, 2, 3, 0, 3, 0, 2, 1, 4, 2, 3, 3, 4, 1, 4, 3, 3, 2, 4, 1, 3, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 5, 3, 3, 3, 3, 3, 2, 0, 2, 0, 0, 2, 0, 0, 2, 0, 0, 1, 0, 0, 3, 1, 2, 2, 3, 0, 3, 0, 2, 0, 4, 4, 3, 3, 4, 1, 0, 3, 0, 0, 2, 4), + (0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 0, 0, 0, 0, 0, 3, 1, 3, 0, 3, 2, 0, 0, 0, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 2, 0, 0, 0, 0, 0, 0, 2), + (0, 2, 1, 3, 0, 2, 0, 2, 0, 3, 3, 3, 3, 1, 3, 1, 3, 3, 3, 3, 3, 3, 4, 2, 2, 1, 2, 1, 4, 0, 4, 3, 1, 3, 3, 3, 2, 4, 3, 5, 4, 3, 3, 3, 3, 3, 3, 3, 0, 1, 3, 0, 2, 0, 0, 1, 0, 0, 1, 0, 0, 4, 2, 0, 2, 3, 0, 3, 3, 0, 3, 3, 4, 2, 3, 1, 4, 0, 1, 2, 0, 2, 3), + (0, 3, 0, 3, 0, 1, 0, 3, 0, 2, 3, 3, 3, 0, 3, 1, 2, 0, 3, 3, 2, 3, 3, 2, 3, 2, 3, 1, 3, 0, 4, 3, 2, 0, 3, 3, 1, 4, 3, 3, 2, 3, 4, 3, 1, 3, 3, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 4, 1, 1, 0, 3, 0, 3, 1, 0, 2, 3, 3, 3, 3, 3, 1, 0, 0, 2, 0, 3, 3), + (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 3, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3), + (0, 2, 0, 3, 1, 3, 0, 3, 0, 2, 3, 3, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 1, 3, 0, 2, 3, 1, 1, 4, 3, 3, 2, 3, 3, 1, 2, 2, 4, 1, 3, 3, 0, 1, 4, 2, 3, 0, 1, 3, 0, 3, 0, 0, 1, 3, 0, 2, 0, 0, 3, 3, 2, 1, 3, 0, 3, 0, 2, 0, 3, 4, 4, 4, 3, 1, 0, 3, 0, 0, 3, 3), + (0, 2, 0, 1, 0, 2, 0, 0, 0, 1, 3, 2, 2, 1, 3, 0, 1, 1, 3, 0, 3, 2, 3, 1, 2, 0, 2, 0, 1, 1, 3, 3, 3, 0, 3, 3, 1, 1, 2, 3, 2, 3, 3, 1, 2, 3, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 0, 2, 1, 2, 1, 3, 0, 3, 0, 0, 0, 3, 4, 4, 4, 3, 2, 0, 2, 0, 0, 2, 4), + (0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 3), + (0, 3, 0, 3, 0, 2, 0, 3, 0, 3, 3, 3, 2, 3, 2, 2, 2, 0, 3, 1, 3, 3, 3, 2, 3, 3, 0, 0, 3, 0, 3, 2, 2, 0, 2, 3, 1, 4, 3, 4, 3, 3, 2, 3, 1, 5, 4, 4, 0, 3, 1, 2, 1, 3, 0, 3, 1, 1, 2, 0, 2, 3, 1, 3, 1, 3, 0, 3, 0, 1, 0, 3, 3, 4, 4, 2, 1, 0, 2, 1, 0, 2, 4), + (0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 4, 2, 5, 1, 4, 0, 2, 0, 2, 1, 3, 1, 4, 0, 2, 1, 0, 0, 2, 1, 4, 1, 1, 0, 3, 3, 0, 5, 1, 3, 2, 3, 3, 1, 0, 3, 2, 3, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 1, 0, 3, 0, 2, 0, 1, 0, 3, 3, 3, 4, 3, 3, 0, 0, 0, 0, 2, 3), + (0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 3), + (0, 1, 0, 3, 0, 4, 0, 3, 0, 2, 4, 3, 1, 0, 3, 2, 2, 1, 3, 1, 2, 2, 3, 1, 1, 1, 2, 1, 3, 0, 1, 2, 0, 1, 3, 2, 1, 3, 0, 5, 5, 1, 0, 0, 1, 3, 2, 1, 0, 3, 0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 1, 1, 1, 3, 2, 0, 2, 0, 1, 0, 2, 3, 3, 1, 2, 3, 0, 1, 0, 1, 0, 4), + (0, 0, 0, 1, 0, 3, 0, 3, 0, 2, 2, 1, 0, 0, 4, 0, 3, 0, 3, 1, 3, 0, 3, 0, 3, 0, 1, 0, 3, 0, 3, 1, 3, 0, 3, 3, 0, 0, 1, 2, 1, 1, 1, 0, 1, 2, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 2, 0, 0, 2, 0, 0, 0, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, 1, 4), + (0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 3, 1, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 0, 2, 0, 2, 3, 0, 0, 2, 2, 3, 1, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 2, 0, 0, 0, 0, 2, 3), + (2, 4, 0, 5, 0, 5, 0, 4, 0, 3, 4, 3, 3, 3, 4, 3, 3, 3, 4, 3, 4, 4, 5, 4, 5, 5, 5, 2, 3, 0, 5, 5, 4, 1, 5, 4, 3, 1, 5, 4, 3, 4, 4, 3, 3, 4, 3, 3, 0, 3, 2, 0, 2, 3, 0, 3, 0, 0, 3, 3, 0, 5, 3, 2, 3, 3, 0, 3, 0, 3, 0, 3, 4, 5, 4, 5, 3, 0, 4, 3, 0, 3, 4), + (0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 3, 4, 3, 2, 3, 2, 3, 0, 4, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 4, 3, 3, 1, 3, 4, 3, 4, 4, 4, 3, 4, 4, 3, 2, 4, 4, 1, 0, 2, 0, 0, 1, 1, 0, 2, 0, 0, 3, 1, 0, 5, 3, 2, 1, 3, 0, 3, 0, 1, 2, 4, 3, 2, 4, 3, 3, 0, 3, 2, 0, 4, 4), + (0, 3, 0, 3, 0, 1, 0, 0, 0, 1, 4, 3, 3, 2, 3, 1, 3, 1, 4, 2, 3, 2, 4, 2, 3, 4, 3, 0, 2, 2, 3, 3, 3, 0, 3, 3, 3, 0, 3, 4, 1, 3, 3, 0, 3, 4, 3, 3, 0, 1, 1, 0, 1, 0, 0, 0, 4, 0, 3, 0, 0, 3, 1, 2, 1, 3, 0, 4, 0, 1, 0, 4, 3, 3, 4, 3, 3, 0, 2, 0, 0, 3, 3), + (0, 3, 0, 4, 0, 1, 0, 3, 0, 3, 4, 3, 3, 0, 3, 3, 3, 1, 3, 1, 3, 3, 4, 3, 3, 3, 0, 0, 3, 1, 5, 3, 3, 1, 3, 3, 2, 5, 4, 3, 3, 4, 5, 3, 2, 5, 3, 4, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 1, 1, 0, 4, 2, 2, 1, 3, 0, 3, 0, 2, 0, 4, 4, 3, 5, 3, 2, 0, 1, 1, 0, 3, 4), + (0, 5, 0, 4, 0, 5, 0, 2, 0, 4, 4, 3, 3, 2, 3, 3, 3, 1, 4, 3, 4, 1, 5, 3, 4, 3, 4, 0, 4, 2, 4, 3, 4, 1, 5, 4, 0, 4, 4, 4, 4, 5, 4, 1, 3, 5, 4, 2, 1, 4, 1, 1, 3, 2, 0, 3, 1, 0, 3, 2, 1, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 4, 4, 3, 3, 3, 0, 4, 2, 0, 3, 4), + (1, 4, 0, 4, 0, 3, 0, 1, 0, 3, 3, 3, 1, 1, 3, 3, 2, 2, 3, 3, 1, 0, 3, 2, 2, 1, 2, 0, 3, 1, 2, 1, 2, 0, 3, 2, 0, 2, 2, 3, 3, 4, 3, 0, 3, 3, 1, 2, 0, 1, 1, 3, 1, 2, 0, 0, 3, 0, 1, 1, 0, 3, 2, 2, 3, 3, 0, 3, 0, 0, 0, 2, 3, 3, 4, 3, 3, 0, 1, 0, 0, 1, 4), + (0, 4, 0, 4, 0, 4, 0, 0, 0, 3, 4, 4, 3, 1, 4, 2, 3, 2, 3, 3, 3, 1, 4, 3, 4, 0, 3, 0, 4, 2, 3, 3, 2, 2, 5, 4, 2, 1, 3, 4, 3, 4, 3, 1, 3, 3, 4, 2, 0, 2, 1, 0, 3, 3, 0, 0, 2, 0, 3, 1, 0, 4, 4, 3, 4, 3, 0, 4, 0, 1, 0, 2, 4, 4, 4, 4, 4, 0, 3, 2, 0, 3, 3), + (0, 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 2, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 2), + (0, 2, 0, 3, 0, 4, 0, 4, 0, 1, 3, 3, 3, 0, 4, 0, 2, 1, 2, 1, 1, 1, 2, 0, 3, 1, 1, 0, 1, 0, 3, 1, 0, 0, 3, 3, 2, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 2, 2, 0, 3, 1, 0, 0, 1, 0, 1, 1, 0, 1, 2, 0, 3, 0, 0, 0, 0, 1, 0, 0, 3, 3, 4, 3, 1, 0, 1, 0, 3, 0, 2), + (0, 0, 0, 3, 0, 5, 0, 0, 0, 0, 1, 0, 2, 0, 3, 1, 0, 1, 3, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 4, 0, 0, 0, 2, 3, 0, 1, 4, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 3), + (0, 2, 0, 5, 0, 5, 0, 1, 0, 2, 4, 3, 3, 2, 5, 1, 3, 2, 3, 3, 3, 0, 4, 1, 2, 0, 3, 0, 4, 0, 2, 2, 1, 1, 5, 3, 0, 0, 1, 4, 2, 3, 2, 0, 3, 3, 3, 2, 0, 2, 4, 1, 1, 2, 0, 1, 1, 0, 3, 1, 0, 1, 3, 1, 2, 3, 0, 2, 0, 0, 0, 1, 3, 5, 4, 4, 4, 0, 3, 0, 0, 1, 3), + (0, 4, 0, 5, 0, 4, 0, 4, 0, 4, 5, 4, 3, 3, 4, 3, 3, 3, 4, 3, 4, 4, 5, 3, 4, 5, 4, 2, 4, 2, 3, 4, 3, 1, 4, 4, 1, 3, 5, 4, 4, 5, 5, 4, 4, 5, 5, 5, 2, 3, 3, 1, 4, 3, 1, 3, 3, 0, 3, 3, 1, 4, 3, 4, 4, 4, 0, 3, 0, 4, 0, 3, 3, 4, 4, 5, 0, 0, 4, 3, 0, 4, 5), + (0, 4, 0, 4, 0, 3, 0, 3, 0, 3, 4, 4, 4, 3, 3, 2, 4, 3, 4, 3, 4, 3, 5, 3, 4, 3, 2, 1, 4, 2, 4, 4, 3, 1, 3, 4, 2, 4, 5, 5, 3, 4, 5, 4, 1, 5, 4, 3, 0, 3, 2, 2, 3, 2, 1, 3, 1, 0, 3, 3, 3, 5, 3, 3, 3, 5, 4, 4, 2, 3, 3, 4, 3, 3, 3, 2, 1, 0, 3, 2, 1, 4, 3), + (0, 4, 0, 5, 0, 4, 0, 3, 0, 3, 5, 5, 3, 2, 4, 3, 4, 0, 5, 4, 4, 1, 4, 4, 4, 3, 3, 3, 4, 3, 5, 5, 2, 3, 3, 4, 1, 2, 5, 5, 3, 5, 5, 2, 3, 5, 5, 4, 0, 3, 2, 0, 3, 3, 1, 1, 5, 1, 4, 1, 0, 4, 3, 2, 3, 5, 0, 4, 0, 3, 0, 5, 4, 3, 4, 3, 0, 0, 4, 1, 0, 4, 4), + (1, 3, 0, 4, 0, 2, 0, 2, 0, 2, 5, 5, 3, 3, 3, 3, 3, 0, 4, 2, 3, 4, 4, 4, 3, 4, 0, 0, 3, 4, 5, 4, 3, 3, 3, 3, 2, 5, 5, 4, 5, 5, 5, 4, 3, 5, 5, 5, 1, 3, 1, 0, 1, 0, 0, 3, 2, 0, 4, 2, 0, 5, 2, 3, 2, 4, 1, 3, 0, 3, 0, 4, 5, 4, 5, 4, 3, 0, 4, 2, 0, 5, 4), + (0, 3, 0, 4, 0, 5, 0, 3, 0, 3, 4, 4, 3, 2, 3, 2, 3, 3, 3, 3, 3, 2, 4, 3, 3, 2, 2, 0, 3, 3, 3, 3, 3, 1, 3, 3, 3, 0, 4, 4, 3, 4, 4, 1, 1, 4, 4, 2, 0, 3, 1, 0, 1, 1, 0, 4, 1, 0, 2, 3, 1, 3, 3, 1, 3, 4, 0, 3, 0, 1, 0, 3, 1, 3, 0, 0, 1, 0, 2, 0, 0, 4, 4), + (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + (0, 3, 0, 3, 0, 2, 0, 3, 0, 1, 5, 4, 3, 3, 3, 1, 4, 2, 1, 2, 3, 4, 4, 2, 4, 4, 5, 0, 3, 1, 4, 3, 4, 0, 4, 3, 3, 3, 2, 3, 2, 5, 3, 4, 3, 2, 2, 3, 0, 0, 3, 0, 2, 1, 0, 1, 2, 0, 0, 0, 0, 2, 1, 1, 3, 1, 0, 2, 0, 4, 0, 3, 4, 4, 4, 5, 2, 0, 2, 0, 0, 1, 3), + (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 4, 2, 1, 1, 0, 1, 0, 3, 2, 0, 0, 3, 1, 1, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 4, 0, 4, 2, 1, 0, 0, 0, 0, 0, 1), + (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 2, 0, 2, 1, 0, 0, 1, 2, 1, 0, 1, 1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2), + (0, 4, 0, 4, 0, 4, 0, 3, 0, 4, 4, 3, 4, 2, 4, 3, 2, 0, 4, 4, 4, 3, 5, 3, 5, 3, 3, 2, 4, 2, 4, 3, 4, 3, 1, 4, 0, 2, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4, 3, 4, 1, 3, 4, 3, 2, 1, 2, 1, 3, 3, 3, 4, 4, 3, 3, 5, 0, 4, 0, 3, 0, 4, 3, 3, 3, 2, 1, 0, 3, 0, 0, 3, 3), + (0, 4, 0, 3, 0, 3, 0, 3, 0, 3, 5, 5, 3, 3, 3, 3, 4, 3, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3, 4, 3, 5, 3, 3, 1, 3, 2, 4, 5, 5, 5, 5, 4, 3, 4, 5, 5, 3, 2, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 4, 3, 2, 2, 1, 2, 0, 3, 0, 0, 4, 1), ) +# fmt: on -class JapaneseContextAnalysis(object): + +class JapaneseContextAnalysis: NUM_OF_CATEGORY = 6 DONT_KNOW = -1 ENOUGH_REL_THRESHOLD = 100 MAX_REL_THRESHOLD = 1000 MINIMUM_DATA_THRESHOLD = 4 - def __init__(self): - self._total_rel = None - self._rel_sample = None - self._need_to_skip_char_num = None - self._last_char_order = None - self._done = None + def __init__(self) -> None: + self._total_rel = 0 + self._rel_sample: List[int] = [] + self._need_to_skip_char_num = 0 + self._last_char_order = -1 + self._done = False self.reset() - def reset(self): + def reset(self) -> None: self._total_rel = 0 # total sequence received # category counters, each integer counts sequence in its category self._rel_sample = [0] * self.NUM_OF_CATEGORY @@ -140,7 +144,7 @@ class JapaneseContextAnalysis(object): # been made self._done = False - def feed(self, byte_str, num_bytes): + def feed(self, byte_str: Union[bytes, bytearray], num_bytes: int) -> None: if self._done: return @@ -153,7 +157,7 @@ class JapaneseContextAnalysis(object): # this character will simply our logic and improve performance. i = self._need_to_skip_char_num while i < num_bytes: - order, char_len = self.get_order(byte_str[i:i + 2]) + order, char_len = self.get_order(byte_str[i : i + 2]) i += char_len if i > num_bytes: self._need_to_skip_char_num = i - num_bytes @@ -164,32 +168,34 @@ class JapaneseContextAnalysis(object): if self._total_rel > self.MAX_REL_THRESHOLD: self._done = True break - self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1 + self._rel_sample[ + jp2_char_context[self._last_char_order][order] + ] += 1 self._last_char_order = order - def got_enough_data(self): + def got_enough_data(self) -> bool: return self._total_rel > self.ENOUGH_REL_THRESHOLD - def get_confidence(self): + def get_confidence(self) -> float: # This is just one way to calculate confidence. It works well for me. if self._total_rel > self.MINIMUM_DATA_THRESHOLD: return (self._total_rel - self._rel_sample[0]) / self._total_rel - else: - return self.DONT_KNOW + return self.DONT_KNOW - def get_order(self, byte_str): + def get_order(self, _: Union[bytes, bytearray]) -> Tuple[int, int]: return -1, 1 + class SJISContextAnalysis(JapaneseContextAnalysis): - def __init__(self): - super(SJISContextAnalysis, self).__init__() + def __init__(self) -> None: + super().__init__() self._charset_name = "SHIFT_JIS" @property - def charset_name(self): + def charset_name(self) -> str: return self._charset_name - def get_order(self, byte_str): + def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]: if not byte_str: return -1, 1 # find out current char's byte length @@ -209,8 +215,9 @@ class SJISContextAnalysis(JapaneseContextAnalysis): return -1, char_len + class EUCJPContextAnalysis(JapaneseContextAnalysis): - def get_order(self, byte_str): + def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]: if not byte_str: return -1, 1 # find out current char's byte length @@ -229,5 +236,3 @@ class EUCJPContextAnalysis(JapaneseContextAnalysis): return second_char - 0xA1, char_len return -1, char_len - - diff --git a/env/Lib/site-packages/pip/_vendor/chardet/langbulgarianmodel.py b/env/Lib/site-packages/pip/_vendor/chardet/langbulgarianmodel.py index e963a50979a0b3dd56558240e075ca0f889479df..994668219dd4def6404e0afd3f538b29a0e50f8b 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/langbulgarianmodel.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/langbulgarianmodel.py @@ -1,9 +1,5 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - # 3: Positive # 2: Likely # 1: Unlikely @@ -4115,536 +4111,539 @@ BULGARIAN_LANG_MODEL = { # Character Mapping Table(s): ISO_8859_5_BULGARIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 77, # 'A' - 66: 90, # 'B' - 67: 99, # 'C' - 68: 100, # 'D' - 69: 72, # 'E' - 70: 109, # 'F' - 71: 107, # 'G' - 72: 101, # 'H' - 73: 79, # 'I' - 74: 185, # 'J' - 75: 81, # 'K' - 76: 102, # 'L' - 77: 76, # 'M' - 78: 94, # 'N' - 79: 82, # 'O' - 80: 110, # 'P' - 81: 186, # 'Q' - 82: 108, # 'R' - 83: 91, # 'S' - 84: 74, # 'T' - 85: 119, # 'U' - 86: 84, # 'V' - 87: 96, # 'W' - 88: 111, # 'X' - 89: 187, # 'Y' - 90: 115, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 65, # 'a' - 98: 69, # 'b' - 99: 70, # 'c' - 100: 66, # 'd' - 101: 63, # 'e' - 102: 68, # 'f' - 103: 112, # 'g' - 104: 103, # 'h' - 105: 92, # 'i' - 106: 194, # 'j' - 107: 104, # 'k' - 108: 95, # 'l' - 109: 86, # 'm' - 110: 87, # 'n' - 111: 71, # 'o' - 112: 116, # 'p' - 113: 195, # 'q' - 114: 85, # 'r' - 115: 93, # 's' - 116: 97, # 't' - 117: 113, # 'u' - 118: 196, # 'v' - 119: 197, # 'w' - 120: 198, # 'x' - 121: 199, # 'y' - 122: 200, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 194, # '\x80' - 129: 195, # '\x81' - 130: 196, # '\x82' - 131: 197, # '\x83' - 132: 198, # '\x84' - 133: 199, # '\x85' - 134: 200, # '\x86' - 135: 201, # '\x87' - 136: 202, # '\x88' - 137: 203, # '\x89' - 138: 204, # '\x8a' - 139: 205, # '\x8b' - 140: 206, # '\x8c' - 141: 207, # '\x8d' - 142: 208, # '\x8e' - 143: 209, # '\x8f' - 144: 210, # '\x90' - 145: 211, # '\x91' - 146: 212, # '\x92' - 147: 213, # '\x93' - 148: 214, # '\x94' - 149: 215, # '\x95' - 150: 216, # '\x96' - 151: 217, # '\x97' - 152: 218, # '\x98' - 153: 219, # '\x99' - 154: 220, # '\x9a' - 155: 221, # '\x9b' - 156: 222, # '\x9c' - 157: 223, # '\x9d' - 158: 224, # '\x9e' - 159: 225, # '\x9f' - 160: 81, # '\xa0' - 161: 226, # 'Ё' - 162: 227, # 'Ђ' - 163: 228, # 'Ѓ' - 164: 229, # 'Є' - 165: 230, # 'Ѕ' - 166: 105, # 'І' - 167: 231, # 'Ї' - 168: 232, # 'Ј' - 169: 233, # 'Љ' - 170: 234, # 'Њ' - 171: 235, # 'Ћ' - 172: 236, # 'Ќ' - 173: 45, # '\xad' - 174: 237, # 'Ў' - 175: 238, # 'Џ' - 176: 31, # 'А' - 177: 32, # 'Б' - 178: 35, # 'В' - 179: 43, # 'Г' - 180: 37, # 'Д' - 181: 44, # 'Е' - 182: 55, # 'Ж' - 183: 47, # 'З' - 184: 40, # 'И' - 185: 59, # 'Й' - 186: 33, # 'К' - 187: 46, # 'Л' - 188: 38, # 'М' - 189: 36, # 'Н' - 190: 41, # 'О' - 191: 30, # 'П' - 192: 39, # 'Р' - 193: 28, # 'С' - 194: 34, # 'Т' - 195: 51, # 'У' - 196: 48, # 'Ф' - 197: 49, # 'Х' - 198: 53, # 'Ц' - 199: 50, # 'Ч' - 200: 54, # 'Ш' - 201: 57, # 'Щ' - 202: 61, # 'Ъ' - 203: 239, # 'Ы' - 204: 67, # 'Ь' - 205: 240, # 'Э' - 206: 60, # 'Ю' - 207: 56, # 'Я' - 208: 1, # 'а' - 209: 18, # 'б' - 210: 9, # 'в' - 211: 20, # 'г' - 212: 11, # 'д' - 213: 3, # 'е' - 214: 23, # 'ж' - 215: 15, # 'з' - 216: 2, # 'и' - 217: 26, # 'й' - 218: 12, # 'к' - 219: 10, # 'л' - 220: 14, # 'м' - 221: 6, # 'н' - 222: 4, # 'о' - 223: 13, # 'п' - 224: 7, # 'р' - 225: 8, # 'с' - 226: 5, # 'т' - 227: 19, # 'у' - 228: 29, # 'ф' - 229: 25, # 'х' - 230: 22, # 'ц' - 231: 21, # 'ч' - 232: 27, # 'ш' - 233: 24, # 'щ' - 234: 17, # 'ъ' - 235: 75, # 'ы' - 236: 52, # 'ь' - 237: 241, # 'э' - 238: 42, # 'ю' - 239: 16, # 'я' - 240: 62, # '№' - 241: 242, # 'ё' - 242: 243, # 'ђ' - 243: 244, # 'ѓ' - 244: 58, # 'є' - 245: 245, # 'ѕ' - 246: 98, # 'і' - 247: 246, # 'ї' - 248: 247, # 'ј' - 249: 248, # 'љ' - 250: 249, # 'њ' - 251: 250, # 'ћ' - 252: 251, # 'ќ' - 253: 91, # '§' - 254: 252, # 'ў' - 255: 253, # 'џ' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 77, # 'A' + 66: 90, # 'B' + 67: 99, # 'C' + 68: 100, # 'D' + 69: 72, # 'E' + 70: 109, # 'F' + 71: 107, # 'G' + 72: 101, # 'H' + 73: 79, # 'I' + 74: 185, # 'J' + 75: 81, # 'K' + 76: 102, # 'L' + 77: 76, # 'M' + 78: 94, # 'N' + 79: 82, # 'O' + 80: 110, # 'P' + 81: 186, # 'Q' + 82: 108, # 'R' + 83: 91, # 'S' + 84: 74, # 'T' + 85: 119, # 'U' + 86: 84, # 'V' + 87: 96, # 'W' + 88: 111, # 'X' + 89: 187, # 'Y' + 90: 115, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 65, # 'a' + 98: 69, # 'b' + 99: 70, # 'c' + 100: 66, # 'd' + 101: 63, # 'e' + 102: 68, # 'f' + 103: 112, # 'g' + 104: 103, # 'h' + 105: 92, # 'i' + 106: 194, # 'j' + 107: 104, # 'k' + 108: 95, # 'l' + 109: 86, # 'm' + 110: 87, # 'n' + 111: 71, # 'o' + 112: 116, # 'p' + 113: 195, # 'q' + 114: 85, # 'r' + 115: 93, # 's' + 116: 97, # 't' + 117: 113, # 'u' + 118: 196, # 'v' + 119: 197, # 'w' + 120: 198, # 'x' + 121: 199, # 'y' + 122: 200, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 194, # '\x80' + 129: 195, # '\x81' + 130: 196, # '\x82' + 131: 197, # '\x83' + 132: 198, # '\x84' + 133: 199, # '\x85' + 134: 200, # '\x86' + 135: 201, # '\x87' + 136: 202, # '\x88' + 137: 203, # '\x89' + 138: 204, # '\x8a' + 139: 205, # '\x8b' + 140: 206, # '\x8c' + 141: 207, # '\x8d' + 142: 208, # '\x8e' + 143: 209, # '\x8f' + 144: 210, # '\x90' + 145: 211, # '\x91' + 146: 212, # '\x92' + 147: 213, # '\x93' + 148: 214, # '\x94' + 149: 215, # '\x95' + 150: 216, # '\x96' + 151: 217, # '\x97' + 152: 218, # '\x98' + 153: 219, # '\x99' + 154: 220, # '\x9a' + 155: 221, # '\x9b' + 156: 222, # '\x9c' + 157: 223, # '\x9d' + 158: 224, # '\x9e' + 159: 225, # '\x9f' + 160: 81, # '\xa0' + 161: 226, # 'Ё' + 162: 227, # 'Ђ' + 163: 228, # 'Ѓ' + 164: 229, # 'Є' + 165: 230, # 'Ѕ' + 166: 105, # 'І' + 167: 231, # 'Ї' + 168: 232, # 'Ј' + 169: 233, # 'Љ' + 170: 234, # 'Њ' + 171: 235, # 'Ћ' + 172: 236, # 'Ќ' + 173: 45, # '\xad' + 174: 237, # 'Ў' + 175: 238, # 'Џ' + 176: 31, # 'А' + 177: 32, # 'Б' + 178: 35, # 'В' + 179: 43, # 'Г' + 180: 37, # 'Д' + 181: 44, # 'Е' + 182: 55, # 'Ж' + 183: 47, # 'З' + 184: 40, # 'И' + 185: 59, # 'Й' + 186: 33, # 'К' + 187: 46, # 'Л' + 188: 38, # 'М' + 189: 36, # 'Н' + 190: 41, # 'О' + 191: 30, # 'П' + 192: 39, # 'Р' + 193: 28, # 'С' + 194: 34, # 'Т' + 195: 51, # 'У' + 196: 48, # 'Ф' + 197: 49, # 'Х' + 198: 53, # 'Ц' + 199: 50, # 'Ч' + 200: 54, # 'Ш' + 201: 57, # 'Щ' + 202: 61, # 'Ъ' + 203: 239, # 'Ы' + 204: 67, # 'Ь' + 205: 240, # 'Э' + 206: 60, # 'Ю' + 207: 56, # 'Я' + 208: 1, # 'а' + 209: 18, # 'б' + 210: 9, # 'в' + 211: 20, # 'г' + 212: 11, # 'д' + 213: 3, # 'е' + 214: 23, # 'ж' + 215: 15, # 'з' + 216: 2, # 'и' + 217: 26, # 'й' + 218: 12, # 'к' + 219: 10, # 'л' + 220: 14, # 'м' + 221: 6, # 'н' + 222: 4, # 'о' + 223: 13, # 'п' + 224: 7, # 'р' + 225: 8, # 'с' + 226: 5, # 'т' + 227: 19, # 'у' + 228: 29, # 'ф' + 229: 25, # 'х' + 230: 22, # 'ц' + 231: 21, # 'ч' + 232: 27, # 'ш' + 233: 24, # 'щ' + 234: 17, # 'ъ' + 235: 75, # 'ы' + 236: 52, # 'ь' + 237: 241, # 'э' + 238: 42, # 'ю' + 239: 16, # 'я' + 240: 62, # '№' + 241: 242, # 'ё' + 242: 243, # 'ђ' + 243: 244, # 'ѓ' + 244: 58, # 'є' + 245: 245, # 'ѕ' + 246: 98, # 'і' + 247: 246, # 'ї' + 248: 247, # 'ј' + 249: 248, # 'љ' + 250: 249, # 'њ' + 251: 250, # 'ћ' + 252: 251, # 'ќ' + 253: 91, # '§' + 254: 252, # 'ў' + 255: 253, # 'џ' } -ISO_8859_5_BULGARIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-5', - language='Bulgarian', - char_to_order_map=ISO_8859_5_BULGARIAN_CHAR_TO_ORDER, - language_model=BULGARIAN_LANG_MODEL, - typical_positive_ratio=0.969392, - keep_ascii_letters=False, - alphabet='АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя') +ISO_8859_5_BULGARIAN_MODEL = SingleByteCharSetModel( + charset_name="ISO-8859-5", + language="Bulgarian", + char_to_order_map=ISO_8859_5_BULGARIAN_CHAR_TO_ORDER, + language_model=BULGARIAN_LANG_MODEL, + typical_positive_ratio=0.969392, + keep_ascii_letters=False, + alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя", +) WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 77, # 'A' - 66: 90, # 'B' - 67: 99, # 'C' - 68: 100, # 'D' - 69: 72, # 'E' - 70: 109, # 'F' - 71: 107, # 'G' - 72: 101, # 'H' - 73: 79, # 'I' - 74: 185, # 'J' - 75: 81, # 'K' - 76: 102, # 'L' - 77: 76, # 'M' - 78: 94, # 'N' - 79: 82, # 'O' - 80: 110, # 'P' - 81: 186, # 'Q' - 82: 108, # 'R' - 83: 91, # 'S' - 84: 74, # 'T' - 85: 119, # 'U' - 86: 84, # 'V' - 87: 96, # 'W' - 88: 111, # 'X' - 89: 187, # 'Y' - 90: 115, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 65, # 'a' - 98: 69, # 'b' - 99: 70, # 'c' - 100: 66, # 'd' - 101: 63, # 'e' - 102: 68, # 'f' - 103: 112, # 'g' - 104: 103, # 'h' - 105: 92, # 'i' - 106: 194, # 'j' - 107: 104, # 'k' - 108: 95, # 'l' - 109: 86, # 'm' - 110: 87, # 'n' - 111: 71, # 'o' - 112: 116, # 'p' - 113: 195, # 'q' - 114: 85, # 'r' - 115: 93, # 's' - 116: 97, # 't' - 117: 113, # 'u' - 118: 196, # 'v' - 119: 197, # 'w' - 120: 198, # 'x' - 121: 199, # 'y' - 122: 200, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 206, # 'Ђ' - 129: 207, # 'Ѓ' - 130: 208, # '‚' - 131: 209, # 'ѓ' - 132: 210, # '„' - 133: 211, # '…' - 134: 212, # '†' - 135: 213, # '‡' - 136: 120, # '€' - 137: 214, # '‰' - 138: 215, # 'Љ' - 139: 216, # '‹' - 140: 217, # 'Њ' - 141: 218, # 'Ќ' - 142: 219, # 'Ћ' - 143: 220, # 'Џ' - 144: 221, # 'ђ' - 145: 78, # '‘' - 146: 64, # '’' - 147: 83, # '“' - 148: 121, # '”' - 149: 98, # '•' - 150: 117, # '–' - 151: 105, # '—' - 152: 222, # None - 153: 223, # '™' - 154: 224, # 'љ' - 155: 225, # '›' - 156: 226, # 'њ' - 157: 227, # 'ќ' - 158: 228, # 'ћ' - 159: 229, # 'џ' - 160: 88, # '\xa0' - 161: 230, # 'Ў' - 162: 231, # 'ў' - 163: 232, # 'Ј' - 164: 233, # '¤' - 165: 122, # 'Ґ' - 166: 89, # '¦' - 167: 106, # '§' - 168: 234, # 'Ё' - 169: 235, # '©' - 170: 236, # 'Є' - 171: 237, # '«' - 172: 238, # '¬' - 173: 45, # '\xad' - 174: 239, # '®' - 175: 240, # 'Ї' - 176: 73, # '°' - 177: 80, # '±' - 178: 118, # 'І' - 179: 114, # 'і' - 180: 241, # 'ґ' - 181: 242, # 'µ' - 182: 243, # '¶' - 183: 244, # '·' - 184: 245, # 'ё' - 185: 62, # '№' - 186: 58, # 'є' - 187: 246, # '»' - 188: 247, # 'ј' - 189: 248, # 'Ѕ' - 190: 249, # 'ѕ' - 191: 250, # 'ї' - 192: 31, # 'А' - 193: 32, # 'Б' - 194: 35, # 'В' - 195: 43, # 'Г' - 196: 37, # 'Д' - 197: 44, # 'Е' - 198: 55, # 'Ж' - 199: 47, # 'З' - 200: 40, # 'И' - 201: 59, # 'Й' - 202: 33, # 'К' - 203: 46, # 'Л' - 204: 38, # 'М' - 205: 36, # 'Н' - 206: 41, # 'О' - 207: 30, # 'П' - 208: 39, # 'Р' - 209: 28, # 'С' - 210: 34, # 'Т' - 211: 51, # 'У' - 212: 48, # 'Ф' - 213: 49, # 'Х' - 214: 53, # 'Ц' - 215: 50, # 'Ч' - 216: 54, # 'Ш' - 217: 57, # 'Щ' - 218: 61, # 'Ъ' - 219: 251, # 'Ы' - 220: 67, # 'Ь' - 221: 252, # 'Э' - 222: 60, # 'Ю' - 223: 56, # 'Я' - 224: 1, # 'а' - 225: 18, # 'б' - 226: 9, # 'в' - 227: 20, # 'г' - 228: 11, # 'д' - 229: 3, # 'е' - 230: 23, # 'ж' - 231: 15, # 'з' - 232: 2, # 'и' - 233: 26, # 'й' - 234: 12, # 'к' - 235: 10, # 'л' - 236: 14, # 'м' - 237: 6, # 'н' - 238: 4, # 'о' - 239: 13, # 'п' - 240: 7, # 'р' - 241: 8, # 'с' - 242: 5, # 'т' - 243: 19, # 'у' - 244: 29, # 'ф' - 245: 25, # 'х' - 246: 22, # 'ц' - 247: 21, # 'ч' - 248: 27, # 'ш' - 249: 24, # 'щ' - 250: 17, # 'ъ' - 251: 75, # 'ы' - 252: 52, # 'ь' - 253: 253, # 'э' - 254: 42, # 'ю' - 255: 16, # 'я' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 77, # 'A' + 66: 90, # 'B' + 67: 99, # 'C' + 68: 100, # 'D' + 69: 72, # 'E' + 70: 109, # 'F' + 71: 107, # 'G' + 72: 101, # 'H' + 73: 79, # 'I' + 74: 185, # 'J' + 75: 81, # 'K' + 76: 102, # 'L' + 77: 76, # 'M' + 78: 94, # 'N' + 79: 82, # 'O' + 80: 110, # 'P' + 81: 186, # 'Q' + 82: 108, # 'R' + 83: 91, # 'S' + 84: 74, # 'T' + 85: 119, # 'U' + 86: 84, # 'V' + 87: 96, # 'W' + 88: 111, # 'X' + 89: 187, # 'Y' + 90: 115, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 65, # 'a' + 98: 69, # 'b' + 99: 70, # 'c' + 100: 66, # 'd' + 101: 63, # 'e' + 102: 68, # 'f' + 103: 112, # 'g' + 104: 103, # 'h' + 105: 92, # 'i' + 106: 194, # 'j' + 107: 104, # 'k' + 108: 95, # 'l' + 109: 86, # 'm' + 110: 87, # 'n' + 111: 71, # 'o' + 112: 116, # 'p' + 113: 195, # 'q' + 114: 85, # 'r' + 115: 93, # 's' + 116: 97, # 't' + 117: 113, # 'u' + 118: 196, # 'v' + 119: 197, # 'w' + 120: 198, # 'x' + 121: 199, # 'y' + 122: 200, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 206, # 'Ђ' + 129: 207, # 'Ѓ' + 130: 208, # '‚' + 131: 209, # 'ѓ' + 132: 210, # '„' + 133: 211, # '…' + 134: 212, # '†' + 135: 213, # '‡' + 136: 120, # '€' + 137: 214, # '‰' + 138: 215, # 'Љ' + 139: 216, # '‹' + 140: 217, # 'Њ' + 141: 218, # 'Ќ' + 142: 219, # 'Ћ' + 143: 220, # 'Џ' + 144: 221, # 'ђ' + 145: 78, # '‘' + 146: 64, # '’' + 147: 83, # '“' + 148: 121, # '”' + 149: 98, # '•' + 150: 117, # '–' + 151: 105, # '—' + 152: 222, # None + 153: 223, # '™' + 154: 224, # 'љ' + 155: 225, # '›' + 156: 226, # 'њ' + 157: 227, # 'ќ' + 158: 228, # 'ћ' + 159: 229, # 'џ' + 160: 88, # '\xa0' + 161: 230, # 'Ў' + 162: 231, # 'ў' + 163: 232, # 'Ј' + 164: 233, # '¤' + 165: 122, # 'Ґ' + 166: 89, # '¦' + 167: 106, # '§' + 168: 234, # 'Ё' + 169: 235, # '©' + 170: 236, # 'Є' + 171: 237, # '«' + 172: 238, # '¬' + 173: 45, # '\xad' + 174: 239, # '®' + 175: 240, # 'Ї' + 176: 73, # '°' + 177: 80, # '±' + 178: 118, # 'І' + 179: 114, # 'і' + 180: 241, # 'ґ' + 181: 242, # 'µ' + 182: 243, # '¶' + 183: 244, # '·' + 184: 245, # 'ё' + 185: 62, # '№' + 186: 58, # 'є' + 187: 246, # '»' + 188: 247, # 'ј' + 189: 248, # 'Ѕ' + 190: 249, # 'ѕ' + 191: 250, # 'ї' + 192: 31, # 'А' + 193: 32, # 'Б' + 194: 35, # 'В' + 195: 43, # 'Г' + 196: 37, # 'Д' + 197: 44, # 'Е' + 198: 55, # 'Ж' + 199: 47, # 'З' + 200: 40, # 'И' + 201: 59, # 'Й' + 202: 33, # 'К' + 203: 46, # 'Л' + 204: 38, # 'М' + 205: 36, # 'Н' + 206: 41, # 'О' + 207: 30, # 'П' + 208: 39, # 'Р' + 209: 28, # 'С' + 210: 34, # 'Т' + 211: 51, # 'У' + 212: 48, # 'Ф' + 213: 49, # 'Х' + 214: 53, # 'Ц' + 215: 50, # 'Ч' + 216: 54, # 'Ш' + 217: 57, # 'Щ' + 218: 61, # 'Ъ' + 219: 251, # 'Ы' + 220: 67, # 'Ь' + 221: 252, # 'Э' + 222: 60, # 'Ю' + 223: 56, # 'Я' + 224: 1, # 'а' + 225: 18, # 'б' + 226: 9, # 'в' + 227: 20, # 'г' + 228: 11, # 'д' + 229: 3, # 'е' + 230: 23, # 'ж' + 231: 15, # 'з' + 232: 2, # 'и' + 233: 26, # 'й' + 234: 12, # 'к' + 235: 10, # 'л' + 236: 14, # 'м' + 237: 6, # 'н' + 238: 4, # 'о' + 239: 13, # 'п' + 240: 7, # 'р' + 241: 8, # 'с' + 242: 5, # 'т' + 243: 19, # 'у' + 244: 29, # 'ф' + 245: 25, # 'х' + 246: 22, # 'ц' + 247: 21, # 'ч' + 248: 27, # 'ш' + 249: 24, # 'щ' + 250: 17, # 'ъ' + 251: 75, # 'ы' + 252: 52, # 'ь' + 253: 253, # 'э' + 254: 42, # 'ю' + 255: 16, # 'я' } -WINDOWS_1251_BULGARIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1251', - language='Bulgarian', - char_to_order_map=WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER, - language_model=BULGARIAN_LANG_MODEL, - typical_positive_ratio=0.969392, - keep_ascii_letters=False, - alphabet='АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя') - +WINDOWS_1251_BULGARIAN_MODEL = SingleByteCharSetModel( + charset_name="windows-1251", + language="Bulgarian", + char_to_order_map=WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER, + language_model=BULGARIAN_LANG_MODEL, + typical_positive_ratio=0.969392, + keep_ascii_letters=False, + alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя", +) diff --git a/env/Lib/site-packages/pip/_vendor/chardet/langgreekmodel.py b/env/Lib/site-packages/pip/_vendor/chardet/langgreekmodel.py index d99528ede7357a287df5f03236f9f7e391f6f35a..cfb8639e5602578cb562ee7197d207dbb539cb74 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/langgreekmodel.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/langgreekmodel.py @@ -1,9 +1,5 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - # 3: Positive # 2: Likely # 1: Unlikely @@ -3863,536 +3859,539 @@ GREEK_LANG_MODEL = { # Character Mapping Table(s): WINDOWS_1253_GREEK_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 82, # 'A' - 66: 100, # 'B' - 67: 104, # 'C' - 68: 94, # 'D' - 69: 98, # 'E' - 70: 101, # 'F' - 71: 116, # 'G' - 72: 102, # 'H' - 73: 111, # 'I' - 74: 187, # 'J' - 75: 117, # 'K' - 76: 92, # 'L' - 77: 88, # 'M' - 78: 113, # 'N' - 79: 85, # 'O' - 80: 79, # 'P' - 81: 118, # 'Q' - 82: 105, # 'R' - 83: 83, # 'S' - 84: 67, # 'T' - 85: 114, # 'U' - 86: 119, # 'V' - 87: 95, # 'W' - 88: 99, # 'X' - 89: 109, # 'Y' - 90: 188, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 72, # 'a' - 98: 70, # 'b' - 99: 80, # 'c' - 100: 81, # 'd' - 101: 60, # 'e' - 102: 96, # 'f' - 103: 93, # 'g' - 104: 89, # 'h' - 105: 68, # 'i' - 106: 120, # 'j' - 107: 97, # 'k' - 108: 77, # 'l' - 109: 86, # 'm' - 110: 69, # 'n' - 111: 55, # 'o' - 112: 78, # 'p' - 113: 115, # 'q' - 114: 65, # 'r' - 115: 66, # 's' - 116: 58, # 't' - 117: 76, # 'u' - 118: 106, # 'v' - 119: 103, # 'w' - 120: 87, # 'x' - 121: 107, # 'y' - 122: 112, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 255, # '€' - 129: 255, # None - 130: 255, # '‚' - 131: 255, # 'ƒ' - 132: 255, # '„' - 133: 255, # '…' - 134: 255, # '†' - 135: 255, # '‡' - 136: 255, # None - 137: 255, # '‰' - 138: 255, # None - 139: 255, # '‹' - 140: 255, # None - 141: 255, # None - 142: 255, # None - 143: 255, # None - 144: 255, # None - 145: 255, # '‘' - 146: 255, # '’' - 147: 255, # '“' - 148: 255, # '”' - 149: 255, # '•' - 150: 255, # '–' - 151: 255, # '—' - 152: 255, # None - 153: 255, # '™' - 154: 255, # None - 155: 255, # '›' - 156: 255, # None - 157: 255, # None - 158: 255, # None - 159: 255, # None - 160: 253, # '\xa0' - 161: 233, # '΅' - 162: 61, # 'Ά' - 163: 253, # '£' - 164: 253, # '¤' - 165: 253, # '¥' - 166: 253, # '¦' - 167: 253, # '§' - 168: 253, # '¨' - 169: 253, # '©' - 170: 253, # None - 171: 253, # '«' - 172: 253, # '¬' - 173: 74, # '\xad' - 174: 253, # '®' - 175: 253, # '―' - 176: 253, # '°' - 177: 253, # '±' - 178: 253, # '²' - 179: 253, # '³' - 180: 247, # '΄' - 181: 253, # 'µ' - 182: 253, # '¶' - 183: 36, # '·' - 184: 46, # 'Έ' - 185: 71, # 'Ή' - 186: 73, # 'Ί' - 187: 253, # '»' - 188: 54, # 'Ό' - 189: 253, # '½' - 190: 108, # 'Ύ' - 191: 123, # 'Ώ' - 192: 110, # 'ΐ' - 193: 31, # 'Α' - 194: 51, # 'Β' - 195: 43, # 'Γ' - 196: 41, # 'Δ' - 197: 34, # 'Ε' - 198: 91, # 'Ζ' - 199: 40, # 'Η' - 200: 52, # 'Θ' - 201: 47, # 'Ι' - 202: 44, # 'Κ' - 203: 53, # 'Λ' - 204: 38, # 'Μ' - 205: 49, # 'Ν' - 206: 59, # 'Ξ' - 207: 39, # 'Ο' - 208: 35, # 'Π' - 209: 48, # 'Ρ' - 210: 250, # None - 211: 37, # 'Σ' - 212: 33, # 'Τ' - 213: 45, # 'Υ' - 214: 56, # 'Φ' - 215: 50, # 'Χ' - 216: 84, # 'Ψ' - 217: 57, # 'Ω' - 218: 120, # 'Ϊ' - 219: 121, # 'Ϋ' - 220: 17, # 'ά' - 221: 18, # 'έ' - 222: 22, # 'ή' - 223: 15, # 'ί' - 224: 124, # 'ΰ' - 225: 1, # 'α' - 226: 29, # 'β' - 227: 20, # 'γ' - 228: 21, # 'δ' - 229: 3, # 'ε' - 230: 32, # 'ζ' - 231: 13, # 'η' - 232: 25, # 'θ' - 233: 5, # 'ι' - 234: 11, # 'κ' - 235: 16, # 'λ' - 236: 10, # 'μ' - 237: 6, # 'ν' - 238: 30, # 'ξ' - 239: 4, # 'ο' - 240: 9, # 'π' - 241: 8, # 'ρ' - 242: 14, # 'ς' - 243: 7, # 'σ' - 244: 2, # 'τ' - 245: 12, # 'υ' - 246: 28, # 'φ' - 247: 23, # 'χ' - 248: 42, # 'ψ' - 249: 24, # 'ω' - 250: 64, # 'ϊ' - 251: 75, # 'ϋ' - 252: 19, # 'ό' - 253: 26, # 'ύ' - 254: 27, # 'ώ' - 255: 253, # None + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 82, # 'A' + 66: 100, # 'B' + 67: 104, # 'C' + 68: 94, # 'D' + 69: 98, # 'E' + 70: 101, # 'F' + 71: 116, # 'G' + 72: 102, # 'H' + 73: 111, # 'I' + 74: 187, # 'J' + 75: 117, # 'K' + 76: 92, # 'L' + 77: 88, # 'M' + 78: 113, # 'N' + 79: 85, # 'O' + 80: 79, # 'P' + 81: 118, # 'Q' + 82: 105, # 'R' + 83: 83, # 'S' + 84: 67, # 'T' + 85: 114, # 'U' + 86: 119, # 'V' + 87: 95, # 'W' + 88: 99, # 'X' + 89: 109, # 'Y' + 90: 188, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 72, # 'a' + 98: 70, # 'b' + 99: 80, # 'c' + 100: 81, # 'd' + 101: 60, # 'e' + 102: 96, # 'f' + 103: 93, # 'g' + 104: 89, # 'h' + 105: 68, # 'i' + 106: 120, # 'j' + 107: 97, # 'k' + 108: 77, # 'l' + 109: 86, # 'm' + 110: 69, # 'n' + 111: 55, # 'o' + 112: 78, # 'p' + 113: 115, # 'q' + 114: 65, # 'r' + 115: 66, # 's' + 116: 58, # 't' + 117: 76, # 'u' + 118: 106, # 'v' + 119: 103, # 'w' + 120: 87, # 'x' + 121: 107, # 'y' + 122: 112, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 255, # '€' + 129: 255, # None + 130: 255, # '‚' + 131: 255, # 'ƒ' + 132: 255, # '„' + 133: 255, # '…' + 134: 255, # '†' + 135: 255, # '‡' + 136: 255, # None + 137: 255, # '‰' + 138: 255, # None + 139: 255, # '‹' + 140: 255, # None + 141: 255, # None + 142: 255, # None + 143: 255, # None + 144: 255, # None + 145: 255, # '‘' + 146: 255, # '’' + 147: 255, # '“' + 148: 255, # '”' + 149: 255, # '•' + 150: 255, # '–' + 151: 255, # '—' + 152: 255, # None + 153: 255, # '™' + 154: 255, # None + 155: 255, # '›' + 156: 255, # None + 157: 255, # None + 158: 255, # None + 159: 255, # None + 160: 253, # '\xa0' + 161: 233, # '΅' + 162: 61, # 'Ά' + 163: 253, # '£' + 164: 253, # '¤' + 165: 253, # '¥' + 166: 253, # '¦' + 167: 253, # '§' + 168: 253, # '¨' + 169: 253, # '©' + 170: 253, # None + 171: 253, # '«' + 172: 253, # '¬' + 173: 74, # '\xad' + 174: 253, # '®' + 175: 253, # '―' + 176: 253, # '°' + 177: 253, # '±' + 178: 253, # '²' + 179: 253, # '³' + 180: 247, # '΄' + 181: 253, # 'µ' + 182: 253, # '¶' + 183: 36, # '·' + 184: 46, # 'Έ' + 185: 71, # 'Ή' + 186: 73, # 'Ί' + 187: 253, # '»' + 188: 54, # 'Ό' + 189: 253, # '½' + 190: 108, # 'Ύ' + 191: 123, # 'Ώ' + 192: 110, # 'ΐ' + 193: 31, # 'Α' + 194: 51, # 'Β' + 195: 43, # 'Γ' + 196: 41, # 'Δ' + 197: 34, # 'Ε' + 198: 91, # 'Ζ' + 199: 40, # 'Η' + 200: 52, # 'Θ' + 201: 47, # 'Ι' + 202: 44, # 'Κ' + 203: 53, # 'Λ' + 204: 38, # 'Μ' + 205: 49, # 'Ν' + 206: 59, # 'Ξ' + 207: 39, # 'Ο' + 208: 35, # 'Π' + 209: 48, # 'Ρ' + 210: 250, # None + 211: 37, # 'Σ' + 212: 33, # 'Τ' + 213: 45, # 'Υ' + 214: 56, # 'Φ' + 215: 50, # 'Χ' + 216: 84, # 'Ψ' + 217: 57, # 'Ω' + 218: 120, # 'Ϊ' + 219: 121, # 'Ϋ' + 220: 17, # 'ά' + 221: 18, # 'έ' + 222: 22, # 'ή' + 223: 15, # 'ί' + 224: 124, # 'ΰ' + 225: 1, # 'α' + 226: 29, # 'β' + 227: 20, # 'γ' + 228: 21, # 'δ' + 229: 3, # 'ε' + 230: 32, # 'ζ' + 231: 13, # 'η' + 232: 25, # 'θ' + 233: 5, # 'ι' + 234: 11, # 'κ' + 235: 16, # 'λ' + 236: 10, # 'μ' + 237: 6, # 'ν' + 238: 30, # 'ξ' + 239: 4, # 'ο' + 240: 9, # 'π' + 241: 8, # 'ρ' + 242: 14, # 'ς' + 243: 7, # 'σ' + 244: 2, # 'τ' + 245: 12, # 'υ' + 246: 28, # 'φ' + 247: 23, # 'χ' + 248: 42, # 'ψ' + 249: 24, # 'ω' + 250: 64, # 'ϊ' + 251: 75, # 'ϋ' + 252: 19, # 'ό' + 253: 26, # 'ύ' + 254: 27, # 'ώ' + 255: 253, # None } -WINDOWS_1253_GREEK_MODEL = SingleByteCharSetModel(charset_name='windows-1253', - language='Greek', - char_to_order_map=WINDOWS_1253_GREEK_CHAR_TO_ORDER, - language_model=GREEK_LANG_MODEL, - typical_positive_ratio=0.982851, - keep_ascii_letters=False, - alphabet='ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ') +WINDOWS_1253_GREEK_MODEL = SingleByteCharSetModel( + charset_name="windows-1253", + language="Greek", + char_to_order_map=WINDOWS_1253_GREEK_CHAR_TO_ORDER, + language_model=GREEK_LANG_MODEL, + typical_positive_ratio=0.982851, + keep_ascii_letters=False, + alphabet="ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ", +) ISO_8859_7_GREEK_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 82, # 'A' - 66: 100, # 'B' - 67: 104, # 'C' - 68: 94, # 'D' - 69: 98, # 'E' - 70: 101, # 'F' - 71: 116, # 'G' - 72: 102, # 'H' - 73: 111, # 'I' - 74: 187, # 'J' - 75: 117, # 'K' - 76: 92, # 'L' - 77: 88, # 'M' - 78: 113, # 'N' - 79: 85, # 'O' - 80: 79, # 'P' - 81: 118, # 'Q' - 82: 105, # 'R' - 83: 83, # 'S' - 84: 67, # 'T' - 85: 114, # 'U' - 86: 119, # 'V' - 87: 95, # 'W' - 88: 99, # 'X' - 89: 109, # 'Y' - 90: 188, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 72, # 'a' - 98: 70, # 'b' - 99: 80, # 'c' - 100: 81, # 'd' - 101: 60, # 'e' - 102: 96, # 'f' - 103: 93, # 'g' - 104: 89, # 'h' - 105: 68, # 'i' - 106: 120, # 'j' - 107: 97, # 'k' - 108: 77, # 'l' - 109: 86, # 'm' - 110: 69, # 'n' - 111: 55, # 'o' - 112: 78, # 'p' - 113: 115, # 'q' - 114: 65, # 'r' - 115: 66, # 's' - 116: 58, # 't' - 117: 76, # 'u' - 118: 106, # 'v' - 119: 103, # 'w' - 120: 87, # 'x' - 121: 107, # 'y' - 122: 112, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 255, # '\x80' - 129: 255, # '\x81' - 130: 255, # '\x82' - 131: 255, # '\x83' - 132: 255, # '\x84' - 133: 255, # '\x85' - 134: 255, # '\x86' - 135: 255, # '\x87' - 136: 255, # '\x88' - 137: 255, # '\x89' - 138: 255, # '\x8a' - 139: 255, # '\x8b' - 140: 255, # '\x8c' - 141: 255, # '\x8d' - 142: 255, # '\x8e' - 143: 255, # '\x8f' - 144: 255, # '\x90' - 145: 255, # '\x91' - 146: 255, # '\x92' - 147: 255, # '\x93' - 148: 255, # '\x94' - 149: 255, # '\x95' - 150: 255, # '\x96' - 151: 255, # '\x97' - 152: 255, # '\x98' - 153: 255, # '\x99' - 154: 255, # '\x9a' - 155: 255, # '\x9b' - 156: 255, # '\x9c' - 157: 255, # '\x9d' - 158: 255, # '\x9e' - 159: 255, # '\x9f' - 160: 253, # '\xa0' - 161: 233, # '‘' - 162: 90, # '’' - 163: 253, # '£' - 164: 253, # '€' - 165: 253, # '₯' - 166: 253, # '¦' - 167: 253, # '§' - 168: 253, # '¨' - 169: 253, # '©' - 170: 253, # 'ͺ' - 171: 253, # '«' - 172: 253, # '¬' - 173: 74, # '\xad' - 174: 253, # None - 175: 253, # '―' - 176: 253, # '°' - 177: 253, # '±' - 178: 253, # '²' - 179: 253, # '³' - 180: 247, # '΄' - 181: 248, # '΅' - 182: 61, # 'Ά' - 183: 36, # '·' - 184: 46, # 'Έ' - 185: 71, # 'Ή' - 186: 73, # 'Ί' - 187: 253, # '»' - 188: 54, # 'Ό' - 189: 253, # '½' - 190: 108, # 'Ύ' - 191: 123, # 'Ώ' - 192: 110, # 'ΐ' - 193: 31, # 'Α' - 194: 51, # 'Β' - 195: 43, # 'Γ' - 196: 41, # 'Δ' - 197: 34, # 'Ε' - 198: 91, # 'Ζ' - 199: 40, # 'Η' - 200: 52, # 'Θ' - 201: 47, # 'Ι' - 202: 44, # 'Κ' - 203: 53, # 'Λ' - 204: 38, # 'Μ' - 205: 49, # 'Ν' - 206: 59, # 'Ξ' - 207: 39, # 'Ο' - 208: 35, # 'Π' - 209: 48, # 'Ρ' - 210: 250, # None - 211: 37, # 'Σ' - 212: 33, # 'Τ' - 213: 45, # 'Υ' - 214: 56, # 'Φ' - 215: 50, # 'Χ' - 216: 84, # 'Ψ' - 217: 57, # 'Ω' - 218: 120, # 'Ϊ' - 219: 121, # 'Ϋ' - 220: 17, # 'ά' - 221: 18, # 'έ' - 222: 22, # 'ή' - 223: 15, # 'ί' - 224: 124, # 'ΰ' - 225: 1, # 'α' - 226: 29, # 'β' - 227: 20, # 'γ' - 228: 21, # 'δ' - 229: 3, # 'ε' - 230: 32, # 'ζ' - 231: 13, # 'η' - 232: 25, # 'θ' - 233: 5, # 'ι' - 234: 11, # 'κ' - 235: 16, # 'λ' - 236: 10, # 'μ' - 237: 6, # 'ν' - 238: 30, # 'ξ' - 239: 4, # 'ο' - 240: 9, # 'π' - 241: 8, # 'ρ' - 242: 14, # 'ς' - 243: 7, # 'σ' - 244: 2, # 'τ' - 245: 12, # 'υ' - 246: 28, # 'φ' - 247: 23, # 'χ' - 248: 42, # 'ψ' - 249: 24, # 'ω' - 250: 64, # 'ϊ' - 251: 75, # 'ϋ' - 252: 19, # 'ό' - 253: 26, # 'ύ' - 254: 27, # 'ώ' - 255: 253, # None + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 82, # 'A' + 66: 100, # 'B' + 67: 104, # 'C' + 68: 94, # 'D' + 69: 98, # 'E' + 70: 101, # 'F' + 71: 116, # 'G' + 72: 102, # 'H' + 73: 111, # 'I' + 74: 187, # 'J' + 75: 117, # 'K' + 76: 92, # 'L' + 77: 88, # 'M' + 78: 113, # 'N' + 79: 85, # 'O' + 80: 79, # 'P' + 81: 118, # 'Q' + 82: 105, # 'R' + 83: 83, # 'S' + 84: 67, # 'T' + 85: 114, # 'U' + 86: 119, # 'V' + 87: 95, # 'W' + 88: 99, # 'X' + 89: 109, # 'Y' + 90: 188, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 72, # 'a' + 98: 70, # 'b' + 99: 80, # 'c' + 100: 81, # 'd' + 101: 60, # 'e' + 102: 96, # 'f' + 103: 93, # 'g' + 104: 89, # 'h' + 105: 68, # 'i' + 106: 120, # 'j' + 107: 97, # 'k' + 108: 77, # 'l' + 109: 86, # 'm' + 110: 69, # 'n' + 111: 55, # 'o' + 112: 78, # 'p' + 113: 115, # 'q' + 114: 65, # 'r' + 115: 66, # 's' + 116: 58, # 't' + 117: 76, # 'u' + 118: 106, # 'v' + 119: 103, # 'w' + 120: 87, # 'x' + 121: 107, # 'y' + 122: 112, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 255, # '\x80' + 129: 255, # '\x81' + 130: 255, # '\x82' + 131: 255, # '\x83' + 132: 255, # '\x84' + 133: 255, # '\x85' + 134: 255, # '\x86' + 135: 255, # '\x87' + 136: 255, # '\x88' + 137: 255, # '\x89' + 138: 255, # '\x8a' + 139: 255, # '\x8b' + 140: 255, # '\x8c' + 141: 255, # '\x8d' + 142: 255, # '\x8e' + 143: 255, # '\x8f' + 144: 255, # '\x90' + 145: 255, # '\x91' + 146: 255, # '\x92' + 147: 255, # '\x93' + 148: 255, # '\x94' + 149: 255, # '\x95' + 150: 255, # '\x96' + 151: 255, # '\x97' + 152: 255, # '\x98' + 153: 255, # '\x99' + 154: 255, # '\x9a' + 155: 255, # '\x9b' + 156: 255, # '\x9c' + 157: 255, # '\x9d' + 158: 255, # '\x9e' + 159: 255, # '\x9f' + 160: 253, # '\xa0' + 161: 233, # '‘' + 162: 90, # '’' + 163: 253, # '£' + 164: 253, # '€' + 165: 253, # '₯' + 166: 253, # '¦' + 167: 253, # '§' + 168: 253, # '¨' + 169: 253, # '©' + 170: 253, # 'ͺ' + 171: 253, # '«' + 172: 253, # '¬' + 173: 74, # '\xad' + 174: 253, # None + 175: 253, # '―' + 176: 253, # '°' + 177: 253, # '±' + 178: 253, # '²' + 179: 253, # '³' + 180: 247, # '΄' + 181: 248, # '΅' + 182: 61, # 'Ά' + 183: 36, # '·' + 184: 46, # 'Έ' + 185: 71, # 'Ή' + 186: 73, # 'Ί' + 187: 253, # '»' + 188: 54, # 'Ό' + 189: 253, # '½' + 190: 108, # 'Ύ' + 191: 123, # 'Ώ' + 192: 110, # 'ΐ' + 193: 31, # 'Α' + 194: 51, # 'Β' + 195: 43, # 'Γ' + 196: 41, # 'Δ' + 197: 34, # 'Ε' + 198: 91, # 'Ζ' + 199: 40, # 'Η' + 200: 52, # 'Θ' + 201: 47, # 'Ι' + 202: 44, # 'Κ' + 203: 53, # 'Λ' + 204: 38, # 'Μ' + 205: 49, # 'Ν' + 206: 59, # 'Ξ' + 207: 39, # 'Ο' + 208: 35, # 'Π' + 209: 48, # 'Ρ' + 210: 250, # None + 211: 37, # 'Σ' + 212: 33, # 'Τ' + 213: 45, # 'Υ' + 214: 56, # 'Φ' + 215: 50, # 'Χ' + 216: 84, # 'Ψ' + 217: 57, # 'Ω' + 218: 120, # 'Ϊ' + 219: 121, # 'Ϋ' + 220: 17, # 'ά' + 221: 18, # 'έ' + 222: 22, # 'ή' + 223: 15, # 'ί' + 224: 124, # 'ΰ' + 225: 1, # 'α' + 226: 29, # 'β' + 227: 20, # 'γ' + 228: 21, # 'δ' + 229: 3, # 'ε' + 230: 32, # 'ζ' + 231: 13, # 'η' + 232: 25, # 'θ' + 233: 5, # 'ι' + 234: 11, # 'κ' + 235: 16, # 'λ' + 236: 10, # 'μ' + 237: 6, # 'ν' + 238: 30, # 'ξ' + 239: 4, # 'ο' + 240: 9, # 'π' + 241: 8, # 'ρ' + 242: 14, # 'ς' + 243: 7, # 'σ' + 244: 2, # 'τ' + 245: 12, # 'υ' + 246: 28, # 'φ' + 247: 23, # 'χ' + 248: 42, # 'ψ' + 249: 24, # 'ω' + 250: 64, # 'ϊ' + 251: 75, # 'ϋ' + 252: 19, # 'ό' + 253: 26, # 'ύ' + 254: 27, # 'ώ' + 255: 253, # None } -ISO_8859_7_GREEK_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-7', - language='Greek', - char_to_order_map=ISO_8859_7_GREEK_CHAR_TO_ORDER, - language_model=GREEK_LANG_MODEL, - typical_positive_ratio=0.982851, - keep_ascii_letters=False, - alphabet='ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ') - +ISO_8859_7_GREEK_MODEL = SingleByteCharSetModel( + charset_name="ISO-8859-7", + language="Greek", + char_to_order_map=ISO_8859_7_GREEK_CHAR_TO_ORDER, + language_model=GREEK_LANG_MODEL, + typical_positive_ratio=0.982851, + keep_ascii_letters=False, + alphabet="ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ", +) diff --git a/env/Lib/site-packages/pip/_vendor/chardet/langhebrewmodel.py b/env/Lib/site-packages/pip/_vendor/chardet/langhebrewmodel.py index 484c652a48e0c97826acabfc475d1bab7ea4763a..56d2975877f092ac62ad403803f6456858affcba 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/langhebrewmodel.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/langhebrewmodel.py @@ -1,9 +1,5 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - # 3: Positive # 2: Likely # 1: Unlikely @@ -4115,269 +4111,270 @@ HEBREW_LANG_MODEL = { # Character Mapping Table(s): WINDOWS_1255_HEBREW_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 69, # 'A' - 66: 91, # 'B' - 67: 79, # 'C' - 68: 80, # 'D' - 69: 92, # 'E' - 70: 89, # 'F' - 71: 97, # 'G' - 72: 90, # 'H' - 73: 68, # 'I' - 74: 111, # 'J' - 75: 112, # 'K' - 76: 82, # 'L' - 77: 73, # 'M' - 78: 95, # 'N' - 79: 85, # 'O' - 80: 78, # 'P' - 81: 121, # 'Q' - 82: 86, # 'R' - 83: 71, # 'S' - 84: 67, # 'T' - 85: 102, # 'U' - 86: 107, # 'V' - 87: 84, # 'W' - 88: 114, # 'X' - 89: 103, # 'Y' - 90: 115, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 50, # 'a' - 98: 74, # 'b' - 99: 60, # 'c' - 100: 61, # 'd' - 101: 42, # 'e' - 102: 76, # 'f' - 103: 70, # 'g' - 104: 64, # 'h' - 105: 53, # 'i' - 106: 105, # 'j' - 107: 93, # 'k' - 108: 56, # 'l' - 109: 65, # 'm' - 110: 54, # 'n' - 111: 49, # 'o' - 112: 66, # 'p' - 113: 110, # 'q' - 114: 51, # 'r' - 115: 43, # 's' - 116: 44, # 't' - 117: 63, # 'u' - 118: 81, # 'v' - 119: 77, # 'w' - 120: 98, # 'x' - 121: 75, # 'y' - 122: 108, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 124, # '€' - 129: 202, # None - 130: 203, # '‚' - 131: 204, # 'ƒ' - 132: 205, # '„' - 133: 40, # '…' - 134: 58, # '†' - 135: 206, # '‡' - 136: 207, # 'ˆ' - 137: 208, # '‰' - 138: 209, # None - 139: 210, # '‹' - 140: 211, # None - 141: 212, # None - 142: 213, # None - 143: 214, # None - 144: 215, # None - 145: 83, # '‘' - 146: 52, # '’' - 147: 47, # '“' - 148: 46, # '”' - 149: 72, # '•' - 150: 32, # '–' - 151: 94, # '—' - 152: 216, # '˜' - 153: 113, # '™' - 154: 217, # None - 155: 109, # '›' - 156: 218, # None - 157: 219, # None - 158: 220, # None - 159: 221, # None - 160: 34, # '\xa0' - 161: 116, # '¡' - 162: 222, # '¢' - 163: 118, # '£' - 164: 100, # '₪' - 165: 223, # '¥' - 166: 224, # '¦' - 167: 117, # '§' - 168: 119, # '¨' - 169: 104, # '©' - 170: 125, # '×' - 171: 225, # '«' - 172: 226, # '¬' - 173: 87, # '\xad' - 174: 99, # '®' - 175: 227, # '¯' - 176: 106, # '°' - 177: 122, # '±' - 178: 123, # '²' - 179: 228, # '³' - 180: 55, # '´' - 181: 229, # 'µ' - 182: 230, # '¶' - 183: 101, # '·' - 184: 231, # '¸' - 185: 232, # '¹' - 186: 120, # '÷' - 187: 233, # '»' - 188: 48, # '¼' - 189: 39, # '½' - 190: 57, # '¾' - 191: 234, # '¿' - 192: 30, # 'ְ' - 193: 59, # 'ֱ' - 194: 41, # 'ֲ' - 195: 88, # 'ֳ' - 196: 33, # 'ִ' - 197: 37, # 'ֵ' - 198: 36, # 'ֶ' - 199: 31, # 'ַ' - 200: 29, # 'ָ' - 201: 35, # 'ֹ' - 202: 235, # None - 203: 62, # 'ֻ' - 204: 28, # 'ּ' - 205: 236, # 'ֽ' - 206: 126, # '־' - 207: 237, # 'ֿ' - 208: 238, # '׀' - 209: 38, # 'ׁ' - 210: 45, # 'ׂ' - 211: 239, # '׃' - 212: 240, # 'װ' - 213: 241, # 'ױ' - 214: 242, # 'ײ' - 215: 243, # '׳' - 216: 127, # '״' - 217: 244, # None - 218: 245, # None - 219: 246, # None - 220: 247, # None - 221: 248, # None - 222: 249, # None - 223: 250, # None - 224: 9, # 'א' - 225: 8, # 'ב' - 226: 20, # 'ג' - 227: 16, # 'ד' - 228: 3, # 'ה' - 229: 2, # 'ו' - 230: 24, # 'ז' - 231: 14, # 'ח' - 232: 22, # 'ט' - 233: 1, # 'י' - 234: 25, # 'ך' - 235: 15, # 'כ' - 236: 4, # 'ל' - 237: 11, # 'ם' - 238: 6, # 'מ' - 239: 23, # 'ן' - 240: 12, # 'נ' - 241: 19, # 'ס' - 242: 13, # 'ע' - 243: 26, # 'ף' - 244: 18, # 'פ' - 245: 27, # 'ץ' - 246: 21, # 'צ' - 247: 17, # 'ק' - 248: 7, # 'ר' - 249: 10, # 'ש' - 250: 5, # 'ת' - 251: 251, # None - 252: 252, # None - 253: 128, # '\u200e' - 254: 96, # '\u200f' - 255: 253, # None + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 69, # 'A' + 66: 91, # 'B' + 67: 79, # 'C' + 68: 80, # 'D' + 69: 92, # 'E' + 70: 89, # 'F' + 71: 97, # 'G' + 72: 90, # 'H' + 73: 68, # 'I' + 74: 111, # 'J' + 75: 112, # 'K' + 76: 82, # 'L' + 77: 73, # 'M' + 78: 95, # 'N' + 79: 85, # 'O' + 80: 78, # 'P' + 81: 121, # 'Q' + 82: 86, # 'R' + 83: 71, # 'S' + 84: 67, # 'T' + 85: 102, # 'U' + 86: 107, # 'V' + 87: 84, # 'W' + 88: 114, # 'X' + 89: 103, # 'Y' + 90: 115, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 50, # 'a' + 98: 74, # 'b' + 99: 60, # 'c' + 100: 61, # 'd' + 101: 42, # 'e' + 102: 76, # 'f' + 103: 70, # 'g' + 104: 64, # 'h' + 105: 53, # 'i' + 106: 105, # 'j' + 107: 93, # 'k' + 108: 56, # 'l' + 109: 65, # 'm' + 110: 54, # 'n' + 111: 49, # 'o' + 112: 66, # 'p' + 113: 110, # 'q' + 114: 51, # 'r' + 115: 43, # 's' + 116: 44, # 't' + 117: 63, # 'u' + 118: 81, # 'v' + 119: 77, # 'w' + 120: 98, # 'x' + 121: 75, # 'y' + 122: 108, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 124, # '€' + 129: 202, # None + 130: 203, # '‚' + 131: 204, # 'ƒ' + 132: 205, # '„' + 133: 40, # '…' + 134: 58, # '†' + 135: 206, # '‡' + 136: 207, # 'ˆ' + 137: 208, # '‰' + 138: 209, # None + 139: 210, # '‹' + 140: 211, # None + 141: 212, # None + 142: 213, # None + 143: 214, # None + 144: 215, # None + 145: 83, # '‘' + 146: 52, # '’' + 147: 47, # '“' + 148: 46, # '”' + 149: 72, # '•' + 150: 32, # '–' + 151: 94, # '—' + 152: 216, # '˜' + 153: 113, # '™' + 154: 217, # None + 155: 109, # '›' + 156: 218, # None + 157: 219, # None + 158: 220, # None + 159: 221, # None + 160: 34, # '\xa0' + 161: 116, # '¡' + 162: 222, # '¢' + 163: 118, # '£' + 164: 100, # '₪' + 165: 223, # '¥' + 166: 224, # '¦' + 167: 117, # '§' + 168: 119, # '¨' + 169: 104, # '©' + 170: 125, # '×' + 171: 225, # '«' + 172: 226, # '¬' + 173: 87, # '\xad' + 174: 99, # '®' + 175: 227, # '¯' + 176: 106, # '°' + 177: 122, # '±' + 178: 123, # '²' + 179: 228, # '³' + 180: 55, # '´' + 181: 229, # 'µ' + 182: 230, # '¶' + 183: 101, # '·' + 184: 231, # '¸' + 185: 232, # '¹' + 186: 120, # '÷' + 187: 233, # '»' + 188: 48, # '¼' + 189: 39, # '½' + 190: 57, # '¾' + 191: 234, # '¿' + 192: 30, # 'ְ' + 193: 59, # 'ֱ' + 194: 41, # 'ֲ' + 195: 88, # 'ֳ' + 196: 33, # 'ִ' + 197: 37, # 'ֵ' + 198: 36, # 'ֶ' + 199: 31, # 'ַ' + 200: 29, # 'ָ' + 201: 35, # 'ֹ' + 202: 235, # None + 203: 62, # 'ֻ' + 204: 28, # 'ּ' + 205: 236, # 'ֽ' + 206: 126, # '־' + 207: 237, # 'ֿ' + 208: 238, # '׀' + 209: 38, # 'ׁ' + 210: 45, # 'ׂ' + 211: 239, # '׃' + 212: 240, # 'װ' + 213: 241, # 'ױ' + 214: 242, # 'ײ' + 215: 243, # '׳' + 216: 127, # '״' + 217: 244, # None + 218: 245, # None + 219: 246, # None + 220: 247, # None + 221: 248, # None + 222: 249, # None + 223: 250, # None + 224: 9, # 'א' + 225: 8, # 'ב' + 226: 20, # 'ג' + 227: 16, # 'ד' + 228: 3, # 'ה' + 229: 2, # 'ו' + 230: 24, # 'ז' + 231: 14, # 'ח' + 232: 22, # 'ט' + 233: 1, # 'י' + 234: 25, # 'ך' + 235: 15, # 'כ' + 236: 4, # 'ל' + 237: 11, # 'ם' + 238: 6, # 'מ' + 239: 23, # 'ן' + 240: 12, # 'נ' + 241: 19, # 'ס' + 242: 13, # 'ע' + 243: 26, # 'ף' + 244: 18, # 'פ' + 245: 27, # 'ץ' + 246: 21, # 'צ' + 247: 17, # 'ק' + 248: 7, # 'ר' + 249: 10, # 'ש' + 250: 5, # 'ת' + 251: 251, # None + 252: 252, # None + 253: 128, # '\u200e' + 254: 96, # '\u200f' + 255: 253, # None } -WINDOWS_1255_HEBREW_MODEL = SingleByteCharSetModel(charset_name='windows-1255', - language='Hebrew', - char_to_order_map=WINDOWS_1255_HEBREW_CHAR_TO_ORDER, - language_model=HEBREW_LANG_MODEL, - typical_positive_ratio=0.984004, - keep_ascii_letters=False, - alphabet='אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ') - +WINDOWS_1255_HEBREW_MODEL = SingleByteCharSetModel( + charset_name="windows-1255", + language="Hebrew", + char_to_order_map=WINDOWS_1255_HEBREW_CHAR_TO_ORDER, + language_model=HEBREW_LANG_MODEL, + typical_positive_ratio=0.984004, + keep_ascii_letters=False, + alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ", +) diff --git a/env/Lib/site-packages/pip/_vendor/chardet/langhungarianmodel.py b/env/Lib/site-packages/pip/_vendor/chardet/langhungarianmodel.py index bbc5cda6441fc696beea666edd8ad6818ef7ea95..09a0d326b983b59b58f84b00e55fbe6909a23793 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/langhungarianmodel.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/langhungarianmodel.py @@ -1,9 +1,5 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - # 3: Positive # 2: Likely # 1: Unlikely @@ -4115,536 +4111,539 @@ HUNGARIAN_LANG_MODEL = { # Character Mapping Table(s): WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 28, # 'A' - 66: 40, # 'B' - 67: 54, # 'C' - 68: 45, # 'D' - 69: 32, # 'E' - 70: 50, # 'F' - 71: 49, # 'G' - 72: 38, # 'H' - 73: 39, # 'I' - 74: 53, # 'J' - 75: 36, # 'K' - 76: 41, # 'L' - 77: 34, # 'M' - 78: 35, # 'N' - 79: 47, # 'O' - 80: 46, # 'P' - 81: 72, # 'Q' - 82: 43, # 'R' - 83: 33, # 'S' - 84: 37, # 'T' - 85: 57, # 'U' - 86: 48, # 'V' - 87: 64, # 'W' - 88: 68, # 'X' - 89: 55, # 'Y' - 90: 52, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 2, # 'a' - 98: 18, # 'b' - 99: 26, # 'c' - 100: 17, # 'd' - 101: 1, # 'e' - 102: 27, # 'f' - 103: 12, # 'g' - 104: 20, # 'h' - 105: 9, # 'i' - 106: 22, # 'j' - 107: 7, # 'k' - 108: 6, # 'l' - 109: 13, # 'm' - 110: 4, # 'n' - 111: 8, # 'o' - 112: 23, # 'p' - 113: 67, # 'q' - 114: 10, # 'r' - 115: 5, # 's' - 116: 3, # 't' - 117: 21, # 'u' - 118: 19, # 'v' - 119: 65, # 'w' - 120: 62, # 'x' - 121: 16, # 'y' - 122: 11, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 161, # '€' - 129: 162, # None - 130: 163, # '‚' - 131: 164, # None - 132: 165, # '„' - 133: 166, # '…' - 134: 167, # '†' - 135: 168, # '‡' - 136: 169, # None - 137: 170, # '‰' - 138: 171, # 'Š' - 139: 172, # '‹' - 140: 173, # 'Ś' - 141: 174, # 'Ť' - 142: 175, # 'Ž' - 143: 176, # 'Ź' - 144: 177, # None - 145: 178, # '‘' - 146: 179, # '’' - 147: 180, # '“' - 148: 78, # '”' - 149: 181, # '•' - 150: 69, # '–' - 151: 182, # '—' - 152: 183, # None - 153: 184, # '™' - 154: 185, # 'š' - 155: 186, # '›' - 156: 187, # 'ś' - 157: 188, # 'ť' - 158: 189, # 'ž' - 159: 190, # 'ź' - 160: 191, # '\xa0' - 161: 192, # 'ˇ' - 162: 193, # '˘' - 163: 194, # 'Ł' - 164: 195, # '¤' - 165: 196, # 'Ą' - 166: 197, # '¦' - 167: 76, # '§' - 168: 198, # '¨' - 169: 199, # '©' - 170: 200, # 'Ş' - 171: 201, # '«' - 172: 202, # '¬' - 173: 203, # '\xad' - 174: 204, # '®' - 175: 205, # 'Ż' - 176: 81, # '°' - 177: 206, # '±' - 178: 207, # '˛' - 179: 208, # 'ł' - 180: 209, # '´' - 181: 210, # 'µ' - 182: 211, # '¶' - 183: 212, # '·' - 184: 213, # '¸' - 185: 214, # 'ą' - 186: 215, # 'ş' - 187: 216, # '»' - 188: 217, # 'Ľ' - 189: 218, # '˝' - 190: 219, # 'ľ' - 191: 220, # 'ż' - 192: 221, # 'Ŕ' - 193: 51, # 'Á' - 194: 83, # 'Â' - 195: 222, # 'Ă' - 196: 80, # 'Ä' - 197: 223, # 'Ĺ' - 198: 224, # 'Ć' - 199: 225, # 'Ç' - 200: 226, # 'Č' - 201: 44, # 'É' - 202: 227, # 'Ę' - 203: 228, # 'Ë' - 204: 229, # 'Ě' - 205: 61, # 'Í' - 206: 230, # 'Î' - 207: 231, # 'Ď' - 208: 232, # 'Đ' - 209: 233, # 'Ń' - 210: 234, # 'Ň' - 211: 58, # 'Ó' - 212: 235, # 'Ô' - 213: 66, # 'Ő' - 214: 59, # 'Ö' - 215: 236, # '×' - 216: 237, # 'Ř' - 217: 238, # 'Ů' - 218: 60, # 'Ú' - 219: 70, # 'Ű' - 220: 63, # 'Ü' - 221: 239, # 'Ý' - 222: 240, # 'Ţ' - 223: 241, # 'ß' - 224: 84, # 'ŕ' - 225: 14, # 'á' - 226: 75, # 'â' - 227: 242, # 'ă' - 228: 71, # 'ä' - 229: 82, # 'ĺ' - 230: 243, # 'ć' - 231: 73, # 'ç' - 232: 244, # 'č' - 233: 15, # 'é' - 234: 85, # 'ę' - 235: 79, # 'ë' - 236: 86, # 'ě' - 237: 30, # 'í' - 238: 77, # 'î' - 239: 87, # 'ď' - 240: 245, # 'đ' - 241: 246, # 'ń' - 242: 247, # 'ň' - 243: 25, # 'ó' - 244: 74, # 'ô' - 245: 42, # 'ő' - 246: 24, # 'ö' - 247: 248, # '÷' - 248: 249, # 'ř' - 249: 250, # 'ů' - 250: 31, # 'ú' - 251: 56, # 'ű' - 252: 29, # 'ü' - 253: 251, # 'ý' - 254: 252, # 'ţ' - 255: 253, # '˙' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 28, # 'A' + 66: 40, # 'B' + 67: 54, # 'C' + 68: 45, # 'D' + 69: 32, # 'E' + 70: 50, # 'F' + 71: 49, # 'G' + 72: 38, # 'H' + 73: 39, # 'I' + 74: 53, # 'J' + 75: 36, # 'K' + 76: 41, # 'L' + 77: 34, # 'M' + 78: 35, # 'N' + 79: 47, # 'O' + 80: 46, # 'P' + 81: 72, # 'Q' + 82: 43, # 'R' + 83: 33, # 'S' + 84: 37, # 'T' + 85: 57, # 'U' + 86: 48, # 'V' + 87: 64, # 'W' + 88: 68, # 'X' + 89: 55, # 'Y' + 90: 52, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 2, # 'a' + 98: 18, # 'b' + 99: 26, # 'c' + 100: 17, # 'd' + 101: 1, # 'e' + 102: 27, # 'f' + 103: 12, # 'g' + 104: 20, # 'h' + 105: 9, # 'i' + 106: 22, # 'j' + 107: 7, # 'k' + 108: 6, # 'l' + 109: 13, # 'm' + 110: 4, # 'n' + 111: 8, # 'o' + 112: 23, # 'p' + 113: 67, # 'q' + 114: 10, # 'r' + 115: 5, # 's' + 116: 3, # 't' + 117: 21, # 'u' + 118: 19, # 'v' + 119: 65, # 'w' + 120: 62, # 'x' + 121: 16, # 'y' + 122: 11, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 161, # '€' + 129: 162, # None + 130: 163, # '‚' + 131: 164, # None + 132: 165, # '„' + 133: 166, # '…' + 134: 167, # '†' + 135: 168, # '‡' + 136: 169, # None + 137: 170, # '‰' + 138: 171, # 'Š' + 139: 172, # '‹' + 140: 173, # 'Ś' + 141: 174, # 'Ť' + 142: 175, # 'Ž' + 143: 176, # 'Ź' + 144: 177, # None + 145: 178, # '‘' + 146: 179, # '’' + 147: 180, # '“' + 148: 78, # '”' + 149: 181, # '•' + 150: 69, # '–' + 151: 182, # '—' + 152: 183, # None + 153: 184, # '™' + 154: 185, # 'š' + 155: 186, # '›' + 156: 187, # 'ś' + 157: 188, # 'ť' + 158: 189, # 'ž' + 159: 190, # 'ź' + 160: 191, # '\xa0' + 161: 192, # 'ˇ' + 162: 193, # '˘' + 163: 194, # 'Ł' + 164: 195, # '¤' + 165: 196, # 'Ą' + 166: 197, # '¦' + 167: 76, # '§' + 168: 198, # '¨' + 169: 199, # '©' + 170: 200, # 'Ş' + 171: 201, # '«' + 172: 202, # '¬' + 173: 203, # '\xad' + 174: 204, # '®' + 175: 205, # 'Ż' + 176: 81, # '°' + 177: 206, # '±' + 178: 207, # '˛' + 179: 208, # 'ł' + 180: 209, # '´' + 181: 210, # 'µ' + 182: 211, # '¶' + 183: 212, # '·' + 184: 213, # '¸' + 185: 214, # 'ą' + 186: 215, # 'ş' + 187: 216, # '»' + 188: 217, # 'Ľ' + 189: 218, # '˝' + 190: 219, # 'ľ' + 191: 220, # 'ż' + 192: 221, # 'Ŕ' + 193: 51, # 'Á' + 194: 83, # 'Â' + 195: 222, # 'Ă' + 196: 80, # 'Ä' + 197: 223, # 'Ĺ' + 198: 224, # 'Ć' + 199: 225, # 'Ç' + 200: 226, # 'Č' + 201: 44, # 'É' + 202: 227, # 'Ę' + 203: 228, # 'Ë' + 204: 229, # 'Ě' + 205: 61, # 'Í' + 206: 230, # 'Î' + 207: 231, # 'Ď' + 208: 232, # 'Đ' + 209: 233, # 'Ń' + 210: 234, # 'Ň' + 211: 58, # 'Ó' + 212: 235, # 'Ô' + 213: 66, # 'Ő' + 214: 59, # 'Ö' + 215: 236, # '×' + 216: 237, # 'Ř' + 217: 238, # 'Ů' + 218: 60, # 'Ú' + 219: 70, # 'Ű' + 220: 63, # 'Ü' + 221: 239, # 'Ý' + 222: 240, # 'Ţ' + 223: 241, # 'ß' + 224: 84, # 'ŕ' + 225: 14, # 'á' + 226: 75, # 'â' + 227: 242, # 'ă' + 228: 71, # 'ä' + 229: 82, # 'ĺ' + 230: 243, # 'ć' + 231: 73, # 'ç' + 232: 244, # 'č' + 233: 15, # 'é' + 234: 85, # 'ę' + 235: 79, # 'ë' + 236: 86, # 'ě' + 237: 30, # 'í' + 238: 77, # 'î' + 239: 87, # 'ď' + 240: 245, # 'đ' + 241: 246, # 'ń' + 242: 247, # 'ň' + 243: 25, # 'ó' + 244: 74, # 'ô' + 245: 42, # 'ő' + 246: 24, # 'ö' + 247: 248, # '÷' + 248: 249, # 'ř' + 249: 250, # 'ů' + 250: 31, # 'ú' + 251: 56, # 'ű' + 252: 29, # 'ü' + 253: 251, # 'ý' + 254: 252, # 'ţ' + 255: 253, # '˙' } -WINDOWS_1250_HUNGARIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1250', - language='Hungarian', - char_to_order_map=WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER, - language_model=HUNGARIAN_LANG_MODEL, - typical_positive_ratio=0.947368, - keep_ascii_letters=True, - alphabet='ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű') +WINDOWS_1250_HUNGARIAN_MODEL = SingleByteCharSetModel( + charset_name="windows-1250", + language="Hungarian", + char_to_order_map=WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER, + language_model=HUNGARIAN_LANG_MODEL, + typical_positive_ratio=0.947368, + keep_ascii_letters=True, + alphabet="ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű", +) ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 28, # 'A' - 66: 40, # 'B' - 67: 54, # 'C' - 68: 45, # 'D' - 69: 32, # 'E' - 70: 50, # 'F' - 71: 49, # 'G' - 72: 38, # 'H' - 73: 39, # 'I' - 74: 53, # 'J' - 75: 36, # 'K' - 76: 41, # 'L' - 77: 34, # 'M' - 78: 35, # 'N' - 79: 47, # 'O' - 80: 46, # 'P' - 81: 71, # 'Q' - 82: 43, # 'R' - 83: 33, # 'S' - 84: 37, # 'T' - 85: 57, # 'U' - 86: 48, # 'V' - 87: 64, # 'W' - 88: 68, # 'X' - 89: 55, # 'Y' - 90: 52, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 2, # 'a' - 98: 18, # 'b' - 99: 26, # 'c' - 100: 17, # 'd' - 101: 1, # 'e' - 102: 27, # 'f' - 103: 12, # 'g' - 104: 20, # 'h' - 105: 9, # 'i' - 106: 22, # 'j' - 107: 7, # 'k' - 108: 6, # 'l' - 109: 13, # 'm' - 110: 4, # 'n' - 111: 8, # 'o' - 112: 23, # 'p' - 113: 67, # 'q' - 114: 10, # 'r' - 115: 5, # 's' - 116: 3, # 't' - 117: 21, # 'u' - 118: 19, # 'v' - 119: 65, # 'w' - 120: 62, # 'x' - 121: 16, # 'y' - 122: 11, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 159, # '\x80' - 129: 160, # '\x81' - 130: 161, # '\x82' - 131: 162, # '\x83' - 132: 163, # '\x84' - 133: 164, # '\x85' - 134: 165, # '\x86' - 135: 166, # '\x87' - 136: 167, # '\x88' - 137: 168, # '\x89' - 138: 169, # '\x8a' - 139: 170, # '\x8b' - 140: 171, # '\x8c' - 141: 172, # '\x8d' - 142: 173, # '\x8e' - 143: 174, # '\x8f' - 144: 175, # '\x90' - 145: 176, # '\x91' - 146: 177, # '\x92' - 147: 178, # '\x93' - 148: 179, # '\x94' - 149: 180, # '\x95' - 150: 181, # '\x96' - 151: 182, # '\x97' - 152: 183, # '\x98' - 153: 184, # '\x99' - 154: 185, # '\x9a' - 155: 186, # '\x9b' - 156: 187, # '\x9c' - 157: 188, # '\x9d' - 158: 189, # '\x9e' - 159: 190, # '\x9f' - 160: 191, # '\xa0' - 161: 192, # 'Ą' - 162: 193, # '˘' - 163: 194, # 'Ł' - 164: 195, # '¤' - 165: 196, # 'Ľ' - 166: 197, # 'Ś' - 167: 75, # '§' - 168: 198, # '¨' - 169: 199, # 'Š' - 170: 200, # 'Ş' - 171: 201, # 'Ť' - 172: 202, # 'Ź' - 173: 203, # '\xad' - 174: 204, # 'Ž' - 175: 205, # 'Ż' - 176: 79, # '°' - 177: 206, # 'ą' - 178: 207, # '˛' - 179: 208, # 'ł' - 180: 209, # '´' - 181: 210, # 'ľ' - 182: 211, # 'ś' - 183: 212, # 'ˇ' - 184: 213, # '¸' - 185: 214, # 'š' - 186: 215, # 'ş' - 187: 216, # 'ť' - 188: 217, # 'ź' - 189: 218, # '˝' - 190: 219, # 'ž' - 191: 220, # 'ż' - 192: 221, # 'Ŕ' - 193: 51, # 'Á' - 194: 81, # 'Â' - 195: 222, # 'Ă' - 196: 78, # 'Ä' - 197: 223, # 'Ĺ' - 198: 224, # 'Ć' - 199: 225, # 'Ç' - 200: 226, # 'Č' - 201: 44, # 'É' - 202: 227, # 'Ę' - 203: 228, # 'Ë' - 204: 229, # 'Ě' - 205: 61, # 'Í' - 206: 230, # 'Î' - 207: 231, # 'Ď' - 208: 232, # 'Đ' - 209: 233, # 'Ń' - 210: 234, # 'Ň' - 211: 58, # 'Ó' - 212: 235, # 'Ô' - 213: 66, # 'Ő' - 214: 59, # 'Ö' - 215: 236, # '×' - 216: 237, # 'Ř' - 217: 238, # 'Ů' - 218: 60, # 'Ú' - 219: 69, # 'Ű' - 220: 63, # 'Ü' - 221: 239, # 'Ý' - 222: 240, # 'Ţ' - 223: 241, # 'ß' - 224: 82, # 'ŕ' - 225: 14, # 'á' - 226: 74, # 'â' - 227: 242, # 'ă' - 228: 70, # 'ä' - 229: 80, # 'ĺ' - 230: 243, # 'ć' - 231: 72, # 'ç' - 232: 244, # 'č' - 233: 15, # 'é' - 234: 83, # 'ę' - 235: 77, # 'ë' - 236: 84, # 'ě' - 237: 30, # 'í' - 238: 76, # 'î' - 239: 85, # 'ď' - 240: 245, # 'đ' - 241: 246, # 'ń' - 242: 247, # 'ň' - 243: 25, # 'ó' - 244: 73, # 'ô' - 245: 42, # 'ő' - 246: 24, # 'ö' - 247: 248, # '÷' - 248: 249, # 'ř' - 249: 250, # 'ů' - 250: 31, # 'ú' - 251: 56, # 'ű' - 252: 29, # 'ü' - 253: 251, # 'ý' - 254: 252, # 'ţ' - 255: 253, # '˙' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 28, # 'A' + 66: 40, # 'B' + 67: 54, # 'C' + 68: 45, # 'D' + 69: 32, # 'E' + 70: 50, # 'F' + 71: 49, # 'G' + 72: 38, # 'H' + 73: 39, # 'I' + 74: 53, # 'J' + 75: 36, # 'K' + 76: 41, # 'L' + 77: 34, # 'M' + 78: 35, # 'N' + 79: 47, # 'O' + 80: 46, # 'P' + 81: 71, # 'Q' + 82: 43, # 'R' + 83: 33, # 'S' + 84: 37, # 'T' + 85: 57, # 'U' + 86: 48, # 'V' + 87: 64, # 'W' + 88: 68, # 'X' + 89: 55, # 'Y' + 90: 52, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 2, # 'a' + 98: 18, # 'b' + 99: 26, # 'c' + 100: 17, # 'd' + 101: 1, # 'e' + 102: 27, # 'f' + 103: 12, # 'g' + 104: 20, # 'h' + 105: 9, # 'i' + 106: 22, # 'j' + 107: 7, # 'k' + 108: 6, # 'l' + 109: 13, # 'm' + 110: 4, # 'n' + 111: 8, # 'o' + 112: 23, # 'p' + 113: 67, # 'q' + 114: 10, # 'r' + 115: 5, # 's' + 116: 3, # 't' + 117: 21, # 'u' + 118: 19, # 'v' + 119: 65, # 'w' + 120: 62, # 'x' + 121: 16, # 'y' + 122: 11, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 159, # '\x80' + 129: 160, # '\x81' + 130: 161, # '\x82' + 131: 162, # '\x83' + 132: 163, # '\x84' + 133: 164, # '\x85' + 134: 165, # '\x86' + 135: 166, # '\x87' + 136: 167, # '\x88' + 137: 168, # '\x89' + 138: 169, # '\x8a' + 139: 170, # '\x8b' + 140: 171, # '\x8c' + 141: 172, # '\x8d' + 142: 173, # '\x8e' + 143: 174, # '\x8f' + 144: 175, # '\x90' + 145: 176, # '\x91' + 146: 177, # '\x92' + 147: 178, # '\x93' + 148: 179, # '\x94' + 149: 180, # '\x95' + 150: 181, # '\x96' + 151: 182, # '\x97' + 152: 183, # '\x98' + 153: 184, # '\x99' + 154: 185, # '\x9a' + 155: 186, # '\x9b' + 156: 187, # '\x9c' + 157: 188, # '\x9d' + 158: 189, # '\x9e' + 159: 190, # '\x9f' + 160: 191, # '\xa0' + 161: 192, # 'Ą' + 162: 193, # '˘' + 163: 194, # 'Ł' + 164: 195, # '¤' + 165: 196, # 'Ľ' + 166: 197, # 'Ś' + 167: 75, # '§' + 168: 198, # '¨' + 169: 199, # 'Š' + 170: 200, # 'Ş' + 171: 201, # 'Ť' + 172: 202, # 'Ź' + 173: 203, # '\xad' + 174: 204, # 'Ž' + 175: 205, # 'Ż' + 176: 79, # '°' + 177: 206, # 'ą' + 178: 207, # '˛' + 179: 208, # 'ł' + 180: 209, # '´' + 181: 210, # 'ľ' + 182: 211, # 'ś' + 183: 212, # 'ˇ' + 184: 213, # '¸' + 185: 214, # 'š' + 186: 215, # 'ş' + 187: 216, # 'ť' + 188: 217, # 'ź' + 189: 218, # '˝' + 190: 219, # 'ž' + 191: 220, # 'ż' + 192: 221, # 'Ŕ' + 193: 51, # 'Á' + 194: 81, # 'Â' + 195: 222, # 'Ă' + 196: 78, # 'Ä' + 197: 223, # 'Ĺ' + 198: 224, # 'Ć' + 199: 225, # 'Ç' + 200: 226, # 'Č' + 201: 44, # 'É' + 202: 227, # 'Ę' + 203: 228, # 'Ë' + 204: 229, # 'Ě' + 205: 61, # 'Í' + 206: 230, # 'Î' + 207: 231, # 'Ď' + 208: 232, # 'Đ' + 209: 233, # 'Ń' + 210: 234, # 'Ň' + 211: 58, # 'Ó' + 212: 235, # 'Ô' + 213: 66, # 'Ő' + 214: 59, # 'Ö' + 215: 236, # '×' + 216: 237, # 'Ř' + 217: 238, # 'Ů' + 218: 60, # 'Ú' + 219: 69, # 'Ű' + 220: 63, # 'Ü' + 221: 239, # 'Ý' + 222: 240, # 'Ţ' + 223: 241, # 'ß' + 224: 82, # 'ŕ' + 225: 14, # 'á' + 226: 74, # 'â' + 227: 242, # 'ă' + 228: 70, # 'ä' + 229: 80, # 'ĺ' + 230: 243, # 'ć' + 231: 72, # 'ç' + 232: 244, # 'č' + 233: 15, # 'é' + 234: 83, # 'ę' + 235: 77, # 'ë' + 236: 84, # 'ě' + 237: 30, # 'í' + 238: 76, # 'î' + 239: 85, # 'ď' + 240: 245, # 'đ' + 241: 246, # 'ń' + 242: 247, # 'ň' + 243: 25, # 'ó' + 244: 73, # 'ô' + 245: 42, # 'ő' + 246: 24, # 'ö' + 247: 248, # '÷' + 248: 249, # 'ř' + 249: 250, # 'ů' + 250: 31, # 'ú' + 251: 56, # 'ű' + 252: 29, # 'ü' + 253: 251, # 'ý' + 254: 252, # 'ţ' + 255: 253, # '˙' } -ISO_8859_2_HUNGARIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-2', - language='Hungarian', - char_to_order_map=ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER, - language_model=HUNGARIAN_LANG_MODEL, - typical_positive_ratio=0.947368, - keep_ascii_letters=True, - alphabet='ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű') - +ISO_8859_2_HUNGARIAN_MODEL = SingleByteCharSetModel( + charset_name="ISO-8859-2", + language="Hungarian", + char_to_order_map=ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER, + language_model=HUNGARIAN_LANG_MODEL, + typical_positive_ratio=0.947368, + keep_ascii_letters=True, + alphabet="ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű", +) diff --git a/env/Lib/site-packages/pip/_vendor/chardet/langrussianmodel.py b/env/Lib/site-packages/pip/_vendor/chardet/langrussianmodel.py index 5594452b55bed63346b36a9647e663e874095412..39a5388948ef12b69b65fbfa89a84c6ef4a4bfd6 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/langrussianmodel.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/langrussianmodel.py @@ -1,9 +1,5 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - # 3: Positive # 2: Likely # 1: Unlikely @@ -4115,1604 +4111,1615 @@ RUSSIAN_LANG_MODEL = { # Character Mapping Table(s): IBM866_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 37, # 'А' - 129: 44, # 'Б' - 130: 33, # 'В' - 131: 46, # 'Г' - 132: 41, # 'Д' - 133: 48, # 'Е' - 134: 56, # 'Ж' - 135: 51, # 'З' - 136: 42, # 'И' - 137: 60, # 'Й' - 138: 36, # 'К' - 139: 49, # 'Л' - 140: 38, # 'М' - 141: 31, # 'Н' - 142: 34, # 'О' - 143: 35, # 'П' - 144: 45, # 'Р' - 145: 32, # 'С' - 146: 40, # 'Т' - 147: 52, # 'У' - 148: 53, # 'Ф' - 149: 55, # 'Х' - 150: 58, # 'Ц' - 151: 50, # 'Ч' - 152: 57, # 'Ш' - 153: 63, # 'Щ' - 154: 70, # 'Ъ' - 155: 62, # 'Ы' - 156: 61, # 'Ь' - 157: 47, # 'Э' - 158: 59, # 'Ю' - 159: 43, # 'Я' - 160: 3, # 'а' - 161: 21, # 'б' - 162: 10, # 'в' - 163: 19, # 'г' - 164: 13, # 'д' - 165: 2, # 'е' - 166: 24, # 'ж' - 167: 20, # 'з' - 168: 4, # 'и' - 169: 23, # 'й' - 170: 11, # 'к' - 171: 8, # 'л' - 172: 12, # 'м' - 173: 5, # 'н' - 174: 1, # 'о' - 175: 15, # 'п' - 176: 191, # '░' - 177: 192, # '▒' - 178: 193, # '▓' - 179: 194, # '│' - 180: 195, # '┤' - 181: 196, # '╡' - 182: 197, # '╢' - 183: 198, # '╖' - 184: 199, # '╕' - 185: 200, # '╣' - 186: 201, # '║' - 187: 202, # '╗' - 188: 203, # '╝' - 189: 204, # '╜' - 190: 205, # '╛' - 191: 206, # '┐' - 192: 207, # '└' - 193: 208, # '┴' - 194: 209, # '┬' - 195: 210, # '├' - 196: 211, # '─' - 197: 212, # '┼' - 198: 213, # '╞' - 199: 214, # '╟' - 200: 215, # '╚' - 201: 216, # '╔' - 202: 217, # '╩' - 203: 218, # '╦' - 204: 219, # '╠' - 205: 220, # '═' - 206: 221, # '╬' - 207: 222, # '╧' - 208: 223, # '╨' - 209: 224, # '╤' - 210: 225, # '╥' - 211: 226, # '╙' - 212: 227, # '╘' - 213: 228, # '╒' - 214: 229, # '╓' - 215: 230, # '╫' - 216: 231, # '╪' - 217: 232, # '┘' - 218: 233, # '┌' - 219: 234, # '█' - 220: 235, # '▄' - 221: 236, # '▌' - 222: 237, # '▐' - 223: 238, # '▀' - 224: 9, # 'р' - 225: 7, # 'с' - 226: 6, # 'т' - 227: 14, # 'у' - 228: 39, # 'ф' - 229: 26, # 'х' - 230: 28, # 'ц' - 231: 22, # 'ч' - 232: 25, # 'ш' - 233: 29, # 'щ' - 234: 54, # 'ъ' - 235: 18, # 'ы' - 236: 17, # 'ь' - 237: 30, # 'э' - 238: 27, # 'ю' - 239: 16, # 'я' - 240: 239, # 'Ё' - 241: 68, # 'ё' - 242: 240, # 'Є' - 243: 241, # 'є' - 244: 242, # 'Ї' - 245: 243, # 'ї' - 246: 244, # 'Ў' - 247: 245, # 'ў' - 248: 246, # '°' - 249: 247, # '∙' - 250: 248, # '·' - 251: 249, # '√' - 252: 250, # '№' - 253: 251, # '¤' - 254: 252, # '■' - 255: 255, # '\xa0' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 142, # 'A' + 66: 143, # 'B' + 67: 144, # 'C' + 68: 145, # 'D' + 69: 146, # 'E' + 70: 147, # 'F' + 71: 148, # 'G' + 72: 149, # 'H' + 73: 150, # 'I' + 74: 151, # 'J' + 75: 152, # 'K' + 76: 74, # 'L' + 77: 153, # 'M' + 78: 75, # 'N' + 79: 154, # 'O' + 80: 155, # 'P' + 81: 156, # 'Q' + 82: 157, # 'R' + 83: 158, # 'S' + 84: 159, # 'T' + 85: 160, # 'U' + 86: 161, # 'V' + 87: 162, # 'W' + 88: 163, # 'X' + 89: 164, # 'Y' + 90: 165, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 71, # 'a' + 98: 172, # 'b' + 99: 66, # 'c' + 100: 173, # 'd' + 101: 65, # 'e' + 102: 174, # 'f' + 103: 76, # 'g' + 104: 175, # 'h' + 105: 64, # 'i' + 106: 176, # 'j' + 107: 177, # 'k' + 108: 77, # 'l' + 109: 72, # 'm' + 110: 178, # 'n' + 111: 69, # 'o' + 112: 67, # 'p' + 113: 179, # 'q' + 114: 78, # 'r' + 115: 73, # 's' + 116: 180, # 't' + 117: 181, # 'u' + 118: 79, # 'v' + 119: 182, # 'w' + 120: 183, # 'x' + 121: 184, # 'y' + 122: 185, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 37, # 'А' + 129: 44, # 'Б' + 130: 33, # 'В' + 131: 46, # 'Г' + 132: 41, # 'Д' + 133: 48, # 'Е' + 134: 56, # 'Ж' + 135: 51, # 'З' + 136: 42, # 'И' + 137: 60, # 'Й' + 138: 36, # 'К' + 139: 49, # 'Л' + 140: 38, # 'М' + 141: 31, # 'Н' + 142: 34, # 'О' + 143: 35, # 'П' + 144: 45, # 'Р' + 145: 32, # 'С' + 146: 40, # 'Т' + 147: 52, # 'У' + 148: 53, # 'Ф' + 149: 55, # 'Х' + 150: 58, # 'Ц' + 151: 50, # 'Ч' + 152: 57, # 'Ш' + 153: 63, # 'Щ' + 154: 70, # 'Ъ' + 155: 62, # 'Ы' + 156: 61, # 'Ь' + 157: 47, # 'Э' + 158: 59, # 'Ю' + 159: 43, # 'Я' + 160: 3, # 'а' + 161: 21, # 'б' + 162: 10, # 'в' + 163: 19, # 'г' + 164: 13, # 'д' + 165: 2, # 'е' + 166: 24, # 'ж' + 167: 20, # 'з' + 168: 4, # 'и' + 169: 23, # 'й' + 170: 11, # 'к' + 171: 8, # 'л' + 172: 12, # 'м' + 173: 5, # 'н' + 174: 1, # 'о' + 175: 15, # 'п' + 176: 191, # '░' + 177: 192, # '▒' + 178: 193, # '▓' + 179: 194, # '│' + 180: 195, # '┤' + 181: 196, # '╡' + 182: 197, # '╢' + 183: 198, # '╖' + 184: 199, # '╕' + 185: 200, # '╣' + 186: 201, # '║' + 187: 202, # '╗' + 188: 203, # '╝' + 189: 204, # '╜' + 190: 205, # '╛' + 191: 206, # '┐' + 192: 207, # '└' + 193: 208, # '┴' + 194: 209, # '┬' + 195: 210, # '├' + 196: 211, # '─' + 197: 212, # '┼' + 198: 213, # '╞' + 199: 214, # '╟' + 200: 215, # '╚' + 201: 216, # '╔' + 202: 217, # '╩' + 203: 218, # '╦' + 204: 219, # '╠' + 205: 220, # '═' + 206: 221, # '╬' + 207: 222, # '╧' + 208: 223, # '╨' + 209: 224, # '╤' + 210: 225, # '╥' + 211: 226, # '╙' + 212: 227, # '╘' + 213: 228, # '╒' + 214: 229, # '╓' + 215: 230, # '╫' + 216: 231, # '╪' + 217: 232, # '┘' + 218: 233, # '┌' + 219: 234, # '█' + 220: 235, # '▄' + 221: 236, # '▌' + 222: 237, # '▐' + 223: 238, # '▀' + 224: 9, # 'р' + 225: 7, # 'с' + 226: 6, # 'т' + 227: 14, # 'у' + 228: 39, # 'ф' + 229: 26, # 'х' + 230: 28, # 'ц' + 231: 22, # 'ч' + 232: 25, # 'ш' + 233: 29, # 'щ' + 234: 54, # 'ъ' + 235: 18, # 'ы' + 236: 17, # 'ь' + 237: 30, # 'э' + 238: 27, # 'ю' + 239: 16, # 'я' + 240: 239, # 'Ё' + 241: 68, # 'ё' + 242: 240, # 'Є' + 243: 241, # 'є' + 244: 242, # 'Ї' + 245: 243, # 'ї' + 246: 244, # 'Ў' + 247: 245, # 'ў' + 248: 246, # '°' + 249: 247, # '∙' + 250: 248, # '·' + 251: 249, # '√' + 252: 250, # '№' + 253: 251, # '¤' + 254: 252, # '■' + 255: 255, # '\xa0' } -IBM866_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='IBM866', - language='Russian', - char_to_order_map=IBM866_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') +IBM866_RUSSIAN_MODEL = SingleByteCharSetModel( + charset_name="IBM866", + language="Russian", + char_to_order_map=IBM866_RUSSIAN_CHAR_TO_ORDER, + language_model=RUSSIAN_LANG_MODEL, + typical_positive_ratio=0.976601, + keep_ascii_letters=False, + alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", +) WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 191, # 'Ђ' - 129: 192, # 'Ѓ' - 130: 193, # '‚' - 131: 194, # 'ѓ' - 132: 195, # '„' - 133: 196, # '…' - 134: 197, # '†' - 135: 198, # '‡' - 136: 199, # '€' - 137: 200, # '‰' - 138: 201, # 'Љ' - 139: 202, # '‹' - 140: 203, # 'Њ' - 141: 204, # 'Ќ' - 142: 205, # 'Ћ' - 143: 206, # 'Џ' - 144: 207, # 'ђ' - 145: 208, # '‘' - 146: 209, # '’' - 147: 210, # '“' - 148: 211, # '”' - 149: 212, # '•' - 150: 213, # '–' - 151: 214, # '—' - 152: 215, # None - 153: 216, # '™' - 154: 217, # 'љ' - 155: 218, # '›' - 156: 219, # 'њ' - 157: 220, # 'ќ' - 158: 221, # 'ћ' - 159: 222, # 'џ' - 160: 223, # '\xa0' - 161: 224, # 'Ў' - 162: 225, # 'ў' - 163: 226, # 'Ј' - 164: 227, # '¤' - 165: 228, # 'Ґ' - 166: 229, # '¦' - 167: 230, # '§' - 168: 231, # 'Ё' - 169: 232, # '©' - 170: 233, # 'Є' - 171: 234, # '«' - 172: 235, # '¬' - 173: 236, # '\xad' - 174: 237, # '®' - 175: 238, # 'Ї' - 176: 239, # '°' - 177: 240, # '±' - 178: 241, # 'І' - 179: 242, # 'і' - 180: 243, # 'ґ' - 181: 244, # 'µ' - 182: 245, # '¶' - 183: 246, # '·' - 184: 68, # 'ё' - 185: 247, # '№' - 186: 248, # 'є' - 187: 249, # '»' - 188: 250, # 'ј' - 189: 251, # 'Ѕ' - 190: 252, # 'ѕ' - 191: 253, # 'ї' - 192: 37, # 'А' - 193: 44, # 'Б' - 194: 33, # 'В' - 195: 46, # 'Г' - 196: 41, # 'Д' - 197: 48, # 'Е' - 198: 56, # 'Ж' - 199: 51, # 'З' - 200: 42, # 'И' - 201: 60, # 'Й' - 202: 36, # 'К' - 203: 49, # 'Л' - 204: 38, # 'М' - 205: 31, # 'Н' - 206: 34, # 'О' - 207: 35, # 'П' - 208: 45, # 'Р' - 209: 32, # 'С' - 210: 40, # 'Т' - 211: 52, # 'У' - 212: 53, # 'Ф' - 213: 55, # 'Х' - 214: 58, # 'Ц' - 215: 50, # 'Ч' - 216: 57, # 'Ш' - 217: 63, # 'Щ' - 218: 70, # 'Ъ' - 219: 62, # 'Ы' - 220: 61, # 'Ь' - 221: 47, # 'Э' - 222: 59, # 'Ю' - 223: 43, # 'Я' - 224: 3, # 'а' - 225: 21, # 'б' - 226: 10, # 'в' - 227: 19, # 'г' - 228: 13, # 'д' - 229: 2, # 'е' - 230: 24, # 'ж' - 231: 20, # 'з' - 232: 4, # 'и' - 233: 23, # 'й' - 234: 11, # 'к' - 235: 8, # 'л' - 236: 12, # 'м' - 237: 5, # 'н' - 238: 1, # 'о' - 239: 15, # 'п' - 240: 9, # 'р' - 241: 7, # 'с' - 242: 6, # 'т' - 243: 14, # 'у' - 244: 39, # 'ф' - 245: 26, # 'х' - 246: 28, # 'ц' - 247: 22, # 'ч' - 248: 25, # 'ш' - 249: 29, # 'щ' - 250: 54, # 'ъ' - 251: 18, # 'ы' - 252: 17, # 'ь' - 253: 30, # 'э' - 254: 27, # 'ю' - 255: 16, # 'я' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 142, # 'A' + 66: 143, # 'B' + 67: 144, # 'C' + 68: 145, # 'D' + 69: 146, # 'E' + 70: 147, # 'F' + 71: 148, # 'G' + 72: 149, # 'H' + 73: 150, # 'I' + 74: 151, # 'J' + 75: 152, # 'K' + 76: 74, # 'L' + 77: 153, # 'M' + 78: 75, # 'N' + 79: 154, # 'O' + 80: 155, # 'P' + 81: 156, # 'Q' + 82: 157, # 'R' + 83: 158, # 'S' + 84: 159, # 'T' + 85: 160, # 'U' + 86: 161, # 'V' + 87: 162, # 'W' + 88: 163, # 'X' + 89: 164, # 'Y' + 90: 165, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 71, # 'a' + 98: 172, # 'b' + 99: 66, # 'c' + 100: 173, # 'd' + 101: 65, # 'e' + 102: 174, # 'f' + 103: 76, # 'g' + 104: 175, # 'h' + 105: 64, # 'i' + 106: 176, # 'j' + 107: 177, # 'k' + 108: 77, # 'l' + 109: 72, # 'm' + 110: 178, # 'n' + 111: 69, # 'o' + 112: 67, # 'p' + 113: 179, # 'q' + 114: 78, # 'r' + 115: 73, # 's' + 116: 180, # 't' + 117: 181, # 'u' + 118: 79, # 'v' + 119: 182, # 'w' + 120: 183, # 'x' + 121: 184, # 'y' + 122: 185, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 191, # 'Ђ' + 129: 192, # 'Ѓ' + 130: 193, # '‚' + 131: 194, # 'ѓ' + 132: 195, # '„' + 133: 196, # '…' + 134: 197, # '†' + 135: 198, # '‡' + 136: 199, # '€' + 137: 200, # '‰' + 138: 201, # 'Љ' + 139: 202, # '‹' + 140: 203, # 'Њ' + 141: 204, # 'Ќ' + 142: 205, # 'Ћ' + 143: 206, # 'Џ' + 144: 207, # 'ђ' + 145: 208, # '‘' + 146: 209, # '’' + 147: 210, # '“' + 148: 211, # '”' + 149: 212, # '•' + 150: 213, # '–' + 151: 214, # '—' + 152: 215, # None + 153: 216, # '™' + 154: 217, # 'љ' + 155: 218, # '›' + 156: 219, # 'њ' + 157: 220, # 'ќ' + 158: 221, # 'ћ' + 159: 222, # 'џ' + 160: 223, # '\xa0' + 161: 224, # 'Ў' + 162: 225, # 'ў' + 163: 226, # 'Ј' + 164: 227, # '¤' + 165: 228, # 'Ґ' + 166: 229, # '¦' + 167: 230, # '§' + 168: 231, # 'Ё' + 169: 232, # '©' + 170: 233, # 'Є' + 171: 234, # '«' + 172: 235, # '¬' + 173: 236, # '\xad' + 174: 237, # '®' + 175: 238, # 'Ї' + 176: 239, # '°' + 177: 240, # '±' + 178: 241, # 'І' + 179: 242, # 'і' + 180: 243, # 'ґ' + 181: 244, # 'µ' + 182: 245, # '¶' + 183: 246, # '·' + 184: 68, # 'ё' + 185: 247, # '№' + 186: 248, # 'є' + 187: 249, # '»' + 188: 250, # 'ј' + 189: 251, # 'Ѕ' + 190: 252, # 'ѕ' + 191: 253, # 'ї' + 192: 37, # 'А' + 193: 44, # 'Б' + 194: 33, # 'В' + 195: 46, # 'Г' + 196: 41, # 'Д' + 197: 48, # 'Е' + 198: 56, # 'Ж' + 199: 51, # 'З' + 200: 42, # 'И' + 201: 60, # 'Й' + 202: 36, # 'К' + 203: 49, # 'Л' + 204: 38, # 'М' + 205: 31, # 'Н' + 206: 34, # 'О' + 207: 35, # 'П' + 208: 45, # 'Р' + 209: 32, # 'С' + 210: 40, # 'Т' + 211: 52, # 'У' + 212: 53, # 'Ф' + 213: 55, # 'Х' + 214: 58, # 'Ц' + 215: 50, # 'Ч' + 216: 57, # 'Ш' + 217: 63, # 'Щ' + 218: 70, # 'Ъ' + 219: 62, # 'Ы' + 220: 61, # 'Ь' + 221: 47, # 'Э' + 222: 59, # 'Ю' + 223: 43, # 'Я' + 224: 3, # 'а' + 225: 21, # 'б' + 226: 10, # 'в' + 227: 19, # 'г' + 228: 13, # 'д' + 229: 2, # 'е' + 230: 24, # 'ж' + 231: 20, # 'з' + 232: 4, # 'и' + 233: 23, # 'й' + 234: 11, # 'к' + 235: 8, # 'л' + 236: 12, # 'м' + 237: 5, # 'н' + 238: 1, # 'о' + 239: 15, # 'п' + 240: 9, # 'р' + 241: 7, # 'с' + 242: 6, # 'т' + 243: 14, # 'у' + 244: 39, # 'ф' + 245: 26, # 'х' + 246: 28, # 'ц' + 247: 22, # 'ч' + 248: 25, # 'ш' + 249: 29, # 'щ' + 250: 54, # 'ъ' + 251: 18, # 'ы' + 252: 17, # 'ь' + 253: 30, # 'э' + 254: 27, # 'ю' + 255: 16, # 'я' } -WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1251', - language='Russian', - char_to_order_map=WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') +WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel( + charset_name="windows-1251", + language="Russian", + char_to_order_map=WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER, + language_model=RUSSIAN_LANG_MODEL, + typical_positive_ratio=0.976601, + keep_ascii_letters=False, + alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", +) IBM855_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 191, # 'ђ' - 129: 192, # 'Ђ' - 130: 193, # 'ѓ' - 131: 194, # 'Ѓ' - 132: 68, # 'ё' - 133: 195, # 'Ё' - 134: 196, # 'є' - 135: 197, # 'Є' - 136: 198, # 'ѕ' - 137: 199, # 'Ѕ' - 138: 200, # 'і' - 139: 201, # 'І' - 140: 202, # 'ї' - 141: 203, # 'Ї' - 142: 204, # 'ј' - 143: 205, # 'Ј' - 144: 206, # 'љ' - 145: 207, # 'Љ' - 146: 208, # 'њ' - 147: 209, # 'Њ' - 148: 210, # 'ћ' - 149: 211, # 'Ћ' - 150: 212, # 'ќ' - 151: 213, # 'Ќ' - 152: 214, # 'ў' - 153: 215, # 'Ў' - 154: 216, # 'џ' - 155: 217, # 'Џ' - 156: 27, # 'ю' - 157: 59, # 'Ю' - 158: 54, # 'ъ' - 159: 70, # 'Ъ' - 160: 3, # 'а' - 161: 37, # 'А' - 162: 21, # 'б' - 163: 44, # 'Б' - 164: 28, # 'ц' - 165: 58, # 'Ц' - 166: 13, # 'д' - 167: 41, # 'Д' - 168: 2, # 'е' - 169: 48, # 'Е' - 170: 39, # 'ф' - 171: 53, # 'Ф' - 172: 19, # 'г' - 173: 46, # 'Г' - 174: 218, # '«' - 175: 219, # '»' - 176: 220, # '░' - 177: 221, # '▒' - 178: 222, # '▓' - 179: 223, # '│' - 180: 224, # '┤' - 181: 26, # 'х' - 182: 55, # 'Х' - 183: 4, # 'и' - 184: 42, # 'И' - 185: 225, # '╣' - 186: 226, # '║' - 187: 227, # '╗' - 188: 228, # '╝' - 189: 23, # 'й' - 190: 60, # 'Й' - 191: 229, # '┐' - 192: 230, # '└' - 193: 231, # '┴' - 194: 232, # '┬' - 195: 233, # '├' - 196: 234, # '─' - 197: 235, # '┼' - 198: 11, # 'к' - 199: 36, # 'К' - 200: 236, # '╚' - 201: 237, # '╔' - 202: 238, # '╩' - 203: 239, # '╦' - 204: 240, # '╠' - 205: 241, # '═' - 206: 242, # '╬' - 207: 243, # '¤' - 208: 8, # 'л' - 209: 49, # 'Л' - 210: 12, # 'м' - 211: 38, # 'М' - 212: 5, # 'н' - 213: 31, # 'Н' - 214: 1, # 'о' - 215: 34, # 'О' - 216: 15, # 'п' - 217: 244, # '┘' - 218: 245, # '┌' - 219: 246, # '█' - 220: 247, # '▄' - 221: 35, # 'П' - 222: 16, # 'я' - 223: 248, # '▀' - 224: 43, # 'Я' - 225: 9, # 'р' - 226: 45, # 'Р' - 227: 7, # 'с' - 228: 32, # 'С' - 229: 6, # 'т' - 230: 40, # 'Т' - 231: 14, # 'у' - 232: 52, # 'У' - 233: 24, # 'ж' - 234: 56, # 'Ж' - 235: 10, # 'в' - 236: 33, # 'В' - 237: 17, # 'ь' - 238: 61, # 'Ь' - 239: 249, # '№' - 240: 250, # '\xad' - 241: 18, # 'ы' - 242: 62, # 'Ы' - 243: 20, # 'з' - 244: 51, # 'З' - 245: 25, # 'ш' - 246: 57, # 'Ш' - 247: 30, # 'э' - 248: 47, # 'Э' - 249: 29, # 'щ' - 250: 63, # 'Щ' - 251: 22, # 'ч' - 252: 50, # 'Ч' - 253: 251, # '§' - 254: 252, # '■' - 255: 255, # '\xa0' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 142, # 'A' + 66: 143, # 'B' + 67: 144, # 'C' + 68: 145, # 'D' + 69: 146, # 'E' + 70: 147, # 'F' + 71: 148, # 'G' + 72: 149, # 'H' + 73: 150, # 'I' + 74: 151, # 'J' + 75: 152, # 'K' + 76: 74, # 'L' + 77: 153, # 'M' + 78: 75, # 'N' + 79: 154, # 'O' + 80: 155, # 'P' + 81: 156, # 'Q' + 82: 157, # 'R' + 83: 158, # 'S' + 84: 159, # 'T' + 85: 160, # 'U' + 86: 161, # 'V' + 87: 162, # 'W' + 88: 163, # 'X' + 89: 164, # 'Y' + 90: 165, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 71, # 'a' + 98: 172, # 'b' + 99: 66, # 'c' + 100: 173, # 'd' + 101: 65, # 'e' + 102: 174, # 'f' + 103: 76, # 'g' + 104: 175, # 'h' + 105: 64, # 'i' + 106: 176, # 'j' + 107: 177, # 'k' + 108: 77, # 'l' + 109: 72, # 'm' + 110: 178, # 'n' + 111: 69, # 'o' + 112: 67, # 'p' + 113: 179, # 'q' + 114: 78, # 'r' + 115: 73, # 's' + 116: 180, # 't' + 117: 181, # 'u' + 118: 79, # 'v' + 119: 182, # 'w' + 120: 183, # 'x' + 121: 184, # 'y' + 122: 185, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 191, # 'ђ' + 129: 192, # 'Ђ' + 130: 193, # 'ѓ' + 131: 194, # 'Ѓ' + 132: 68, # 'ё' + 133: 195, # 'Ё' + 134: 196, # 'є' + 135: 197, # 'Є' + 136: 198, # 'ѕ' + 137: 199, # 'Ѕ' + 138: 200, # 'і' + 139: 201, # 'І' + 140: 202, # 'ї' + 141: 203, # 'Ї' + 142: 204, # 'ј' + 143: 205, # 'Ј' + 144: 206, # 'љ' + 145: 207, # 'Љ' + 146: 208, # 'њ' + 147: 209, # 'Њ' + 148: 210, # 'ћ' + 149: 211, # 'Ћ' + 150: 212, # 'ќ' + 151: 213, # 'Ќ' + 152: 214, # 'ў' + 153: 215, # 'Ў' + 154: 216, # 'џ' + 155: 217, # 'Џ' + 156: 27, # 'ю' + 157: 59, # 'Ю' + 158: 54, # 'ъ' + 159: 70, # 'Ъ' + 160: 3, # 'а' + 161: 37, # 'А' + 162: 21, # 'б' + 163: 44, # 'Б' + 164: 28, # 'ц' + 165: 58, # 'Ц' + 166: 13, # 'д' + 167: 41, # 'Д' + 168: 2, # 'е' + 169: 48, # 'Е' + 170: 39, # 'ф' + 171: 53, # 'Ф' + 172: 19, # 'г' + 173: 46, # 'Г' + 174: 218, # '«' + 175: 219, # '»' + 176: 220, # '░' + 177: 221, # '▒' + 178: 222, # '▓' + 179: 223, # '│' + 180: 224, # '┤' + 181: 26, # 'х' + 182: 55, # 'Х' + 183: 4, # 'и' + 184: 42, # 'И' + 185: 225, # '╣' + 186: 226, # '║' + 187: 227, # '╗' + 188: 228, # '╝' + 189: 23, # 'й' + 190: 60, # 'Й' + 191: 229, # '┐' + 192: 230, # '└' + 193: 231, # '┴' + 194: 232, # '┬' + 195: 233, # '├' + 196: 234, # '─' + 197: 235, # '┼' + 198: 11, # 'к' + 199: 36, # 'К' + 200: 236, # '╚' + 201: 237, # '╔' + 202: 238, # '╩' + 203: 239, # '╦' + 204: 240, # '╠' + 205: 241, # '═' + 206: 242, # '╬' + 207: 243, # '¤' + 208: 8, # 'л' + 209: 49, # 'Л' + 210: 12, # 'м' + 211: 38, # 'М' + 212: 5, # 'н' + 213: 31, # 'Н' + 214: 1, # 'о' + 215: 34, # 'О' + 216: 15, # 'п' + 217: 244, # '┘' + 218: 245, # '┌' + 219: 246, # '█' + 220: 247, # '▄' + 221: 35, # 'П' + 222: 16, # 'я' + 223: 248, # '▀' + 224: 43, # 'Я' + 225: 9, # 'р' + 226: 45, # 'Р' + 227: 7, # 'с' + 228: 32, # 'С' + 229: 6, # 'т' + 230: 40, # 'Т' + 231: 14, # 'у' + 232: 52, # 'У' + 233: 24, # 'ж' + 234: 56, # 'Ж' + 235: 10, # 'в' + 236: 33, # 'В' + 237: 17, # 'ь' + 238: 61, # 'Ь' + 239: 249, # '№' + 240: 250, # '\xad' + 241: 18, # 'ы' + 242: 62, # 'Ы' + 243: 20, # 'з' + 244: 51, # 'З' + 245: 25, # 'ш' + 246: 57, # 'Ш' + 247: 30, # 'э' + 248: 47, # 'Э' + 249: 29, # 'щ' + 250: 63, # 'Щ' + 251: 22, # 'ч' + 252: 50, # 'Ч' + 253: 251, # '§' + 254: 252, # '■' + 255: 255, # '\xa0' } -IBM855_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='IBM855', - language='Russian', - char_to_order_map=IBM855_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') +IBM855_RUSSIAN_MODEL = SingleByteCharSetModel( + charset_name="IBM855", + language="Russian", + char_to_order_map=IBM855_RUSSIAN_CHAR_TO_ORDER, + language_model=RUSSIAN_LANG_MODEL, + typical_positive_ratio=0.976601, + keep_ascii_letters=False, + alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", +) KOI8_R_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 191, # '─' - 129: 192, # '│' - 130: 193, # '┌' - 131: 194, # '┐' - 132: 195, # '└' - 133: 196, # '┘' - 134: 197, # '├' - 135: 198, # '┤' - 136: 199, # '┬' - 137: 200, # '┴' - 138: 201, # '┼' - 139: 202, # '▀' - 140: 203, # '▄' - 141: 204, # '█' - 142: 205, # '▌' - 143: 206, # '▐' - 144: 207, # '░' - 145: 208, # '▒' - 146: 209, # '▓' - 147: 210, # '⌠' - 148: 211, # '■' - 149: 212, # '∙' - 150: 213, # '√' - 151: 214, # '≈' - 152: 215, # '≤' - 153: 216, # '≥' - 154: 217, # '\xa0' - 155: 218, # '⌡' - 156: 219, # '°' - 157: 220, # '²' - 158: 221, # '·' - 159: 222, # '÷' - 160: 223, # '═' - 161: 224, # '║' - 162: 225, # '╒' - 163: 68, # 'ё' - 164: 226, # '╓' - 165: 227, # '╔' - 166: 228, # '╕' - 167: 229, # '╖' - 168: 230, # '╗' - 169: 231, # '╘' - 170: 232, # '╙' - 171: 233, # '╚' - 172: 234, # '╛' - 173: 235, # '╜' - 174: 236, # '╝' - 175: 237, # '╞' - 176: 238, # '╟' - 177: 239, # '╠' - 178: 240, # '╡' - 179: 241, # 'Ё' - 180: 242, # '╢' - 181: 243, # '╣' - 182: 244, # '╤' - 183: 245, # '╥' - 184: 246, # '╦' - 185: 247, # '╧' - 186: 248, # '╨' - 187: 249, # '╩' - 188: 250, # '╪' - 189: 251, # '╫' - 190: 252, # '╬' - 191: 253, # '©' - 192: 27, # 'ю' - 193: 3, # 'а' - 194: 21, # 'б' - 195: 28, # 'ц' - 196: 13, # 'д' - 197: 2, # 'е' - 198: 39, # 'ф' - 199: 19, # 'г' - 200: 26, # 'х' - 201: 4, # 'и' - 202: 23, # 'й' - 203: 11, # 'к' - 204: 8, # 'л' - 205: 12, # 'м' - 206: 5, # 'н' - 207: 1, # 'о' - 208: 15, # 'п' - 209: 16, # 'я' - 210: 9, # 'р' - 211: 7, # 'с' - 212: 6, # 'т' - 213: 14, # 'у' - 214: 24, # 'ж' - 215: 10, # 'в' - 216: 17, # 'ь' - 217: 18, # 'ы' - 218: 20, # 'з' - 219: 25, # 'ш' - 220: 30, # 'э' - 221: 29, # 'щ' - 222: 22, # 'ч' - 223: 54, # 'ъ' - 224: 59, # 'Ю' - 225: 37, # 'А' - 226: 44, # 'Б' - 227: 58, # 'Ц' - 228: 41, # 'Д' - 229: 48, # 'Е' - 230: 53, # 'Ф' - 231: 46, # 'Г' - 232: 55, # 'Х' - 233: 42, # 'И' - 234: 60, # 'Й' - 235: 36, # 'К' - 236: 49, # 'Л' - 237: 38, # 'М' - 238: 31, # 'Н' - 239: 34, # 'О' - 240: 35, # 'П' - 241: 43, # 'Я' - 242: 45, # 'Р' - 243: 32, # 'С' - 244: 40, # 'Т' - 245: 52, # 'У' - 246: 56, # 'Ж' - 247: 33, # 'В' - 248: 61, # 'Ь' - 249: 62, # 'Ы' - 250: 51, # 'З' - 251: 57, # 'Ш' - 252: 47, # 'Э' - 253: 63, # 'Щ' - 254: 50, # 'Ч' - 255: 70, # 'Ъ' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 142, # 'A' + 66: 143, # 'B' + 67: 144, # 'C' + 68: 145, # 'D' + 69: 146, # 'E' + 70: 147, # 'F' + 71: 148, # 'G' + 72: 149, # 'H' + 73: 150, # 'I' + 74: 151, # 'J' + 75: 152, # 'K' + 76: 74, # 'L' + 77: 153, # 'M' + 78: 75, # 'N' + 79: 154, # 'O' + 80: 155, # 'P' + 81: 156, # 'Q' + 82: 157, # 'R' + 83: 158, # 'S' + 84: 159, # 'T' + 85: 160, # 'U' + 86: 161, # 'V' + 87: 162, # 'W' + 88: 163, # 'X' + 89: 164, # 'Y' + 90: 165, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 71, # 'a' + 98: 172, # 'b' + 99: 66, # 'c' + 100: 173, # 'd' + 101: 65, # 'e' + 102: 174, # 'f' + 103: 76, # 'g' + 104: 175, # 'h' + 105: 64, # 'i' + 106: 176, # 'j' + 107: 177, # 'k' + 108: 77, # 'l' + 109: 72, # 'm' + 110: 178, # 'n' + 111: 69, # 'o' + 112: 67, # 'p' + 113: 179, # 'q' + 114: 78, # 'r' + 115: 73, # 's' + 116: 180, # 't' + 117: 181, # 'u' + 118: 79, # 'v' + 119: 182, # 'w' + 120: 183, # 'x' + 121: 184, # 'y' + 122: 185, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 191, # '─' + 129: 192, # '│' + 130: 193, # '┌' + 131: 194, # '┐' + 132: 195, # '└' + 133: 196, # '┘' + 134: 197, # '├' + 135: 198, # '┤' + 136: 199, # '┬' + 137: 200, # '┴' + 138: 201, # '┼' + 139: 202, # '▀' + 140: 203, # '▄' + 141: 204, # '█' + 142: 205, # '▌' + 143: 206, # '▐' + 144: 207, # '░' + 145: 208, # '▒' + 146: 209, # '▓' + 147: 210, # '⌠' + 148: 211, # '■' + 149: 212, # '∙' + 150: 213, # '√' + 151: 214, # '≈' + 152: 215, # '≤' + 153: 216, # '≥' + 154: 217, # '\xa0' + 155: 218, # '⌡' + 156: 219, # '°' + 157: 220, # '²' + 158: 221, # '·' + 159: 222, # '÷' + 160: 223, # '═' + 161: 224, # '║' + 162: 225, # '╒' + 163: 68, # 'ё' + 164: 226, # '╓' + 165: 227, # '╔' + 166: 228, # '╕' + 167: 229, # '╖' + 168: 230, # '╗' + 169: 231, # '╘' + 170: 232, # '╙' + 171: 233, # '╚' + 172: 234, # '╛' + 173: 235, # '╜' + 174: 236, # '╝' + 175: 237, # '╞' + 176: 238, # '╟' + 177: 239, # '╠' + 178: 240, # '╡' + 179: 241, # 'Ё' + 180: 242, # '╢' + 181: 243, # '╣' + 182: 244, # '╤' + 183: 245, # '╥' + 184: 246, # '╦' + 185: 247, # '╧' + 186: 248, # '╨' + 187: 249, # '╩' + 188: 250, # '╪' + 189: 251, # '╫' + 190: 252, # '╬' + 191: 253, # '©' + 192: 27, # 'ю' + 193: 3, # 'а' + 194: 21, # 'б' + 195: 28, # 'ц' + 196: 13, # 'д' + 197: 2, # 'е' + 198: 39, # 'ф' + 199: 19, # 'г' + 200: 26, # 'х' + 201: 4, # 'и' + 202: 23, # 'й' + 203: 11, # 'к' + 204: 8, # 'л' + 205: 12, # 'м' + 206: 5, # 'н' + 207: 1, # 'о' + 208: 15, # 'п' + 209: 16, # 'я' + 210: 9, # 'р' + 211: 7, # 'с' + 212: 6, # 'т' + 213: 14, # 'у' + 214: 24, # 'ж' + 215: 10, # 'в' + 216: 17, # 'ь' + 217: 18, # 'ы' + 218: 20, # 'з' + 219: 25, # 'ш' + 220: 30, # 'э' + 221: 29, # 'щ' + 222: 22, # 'ч' + 223: 54, # 'ъ' + 224: 59, # 'Ю' + 225: 37, # 'А' + 226: 44, # 'Б' + 227: 58, # 'Ц' + 228: 41, # 'Д' + 229: 48, # 'Е' + 230: 53, # 'Ф' + 231: 46, # 'Г' + 232: 55, # 'Х' + 233: 42, # 'И' + 234: 60, # 'Й' + 235: 36, # 'К' + 236: 49, # 'Л' + 237: 38, # 'М' + 238: 31, # 'Н' + 239: 34, # 'О' + 240: 35, # 'П' + 241: 43, # 'Я' + 242: 45, # 'Р' + 243: 32, # 'С' + 244: 40, # 'Т' + 245: 52, # 'У' + 246: 56, # 'Ж' + 247: 33, # 'В' + 248: 61, # 'Ь' + 249: 62, # 'Ы' + 250: 51, # 'З' + 251: 57, # 'Ш' + 252: 47, # 'Э' + 253: 63, # 'Щ' + 254: 50, # 'Ч' + 255: 70, # 'Ъ' } -KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='KOI8-R', - language='Russian', - char_to_order_map=KOI8_R_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') +KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel( + charset_name="KOI8-R", + language="Russian", + char_to_order_map=KOI8_R_RUSSIAN_CHAR_TO_ORDER, + language_model=RUSSIAN_LANG_MODEL, + typical_positive_ratio=0.976601, + keep_ascii_letters=False, + alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", +) MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 37, # 'А' - 129: 44, # 'Б' - 130: 33, # 'В' - 131: 46, # 'Г' - 132: 41, # 'Д' - 133: 48, # 'Е' - 134: 56, # 'Ж' - 135: 51, # 'З' - 136: 42, # 'И' - 137: 60, # 'Й' - 138: 36, # 'К' - 139: 49, # 'Л' - 140: 38, # 'М' - 141: 31, # 'Н' - 142: 34, # 'О' - 143: 35, # 'П' - 144: 45, # 'Р' - 145: 32, # 'С' - 146: 40, # 'Т' - 147: 52, # 'У' - 148: 53, # 'Ф' - 149: 55, # 'Х' - 150: 58, # 'Ц' - 151: 50, # 'Ч' - 152: 57, # 'Ш' - 153: 63, # 'Щ' - 154: 70, # 'Ъ' - 155: 62, # 'Ы' - 156: 61, # 'Ь' - 157: 47, # 'Э' - 158: 59, # 'Ю' - 159: 43, # 'Я' - 160: 191, # '†' - 161: 192, # '°' - 162: 193, # 'Ґ' - 163: 194, # '£' - 164: 195, # '§' - 165: 196, # '•' - 166: 197, # '¶' - 167: 198, # 'І' - 168: 199, # '®' - 169: 200, # '©' - 170: 201, # '™' - 171: 202, # 'Ђ' - 172: 203, # 'ђ' - 173: 204, # '≠' - 174: 205, # 'Ѓ' - 175: 206, # 'ѓ' - 176: 207, # '∞' - 177: 208, # '±' - 178: 209, # '≤' - 179: 210, # '≥' - 180: 211, # 'і' - 181: 212, # 'µ' - 182: 213, # 'ґ' - 183: 214, # 'Ј' - 184: 215, # 'Є' - 185: 216, # 'є' - 186: 217, # 'Ї' - 187: 218, # 'ї' - 188: 219, # 'Љ' - 189: 220, # 'љ' - 190: 221, # 'Њ' - 191: 222, # 'њ' - 192: 223, # 'ј' - 193: 224, # 'Ѕ' - 194: 225, # '¬' - 195: 226, # '√' - 196: 227, # 'ƒ' - 197: 228, # '≈' - 198: 229, # '∆' - 199: 230, # '«' - 200: 231, # '»' - 201: 232, # '…' - 202: 233, # '\xa0' - 203: 234, # 'Ћ' - 204: 235, # 'ћ' - 205: 236, # 'Ќ' - 206: 237, # 'ќ' - 207: 238, # 'ѕ' - 208: 239, # '–' - 209: 240, # '—' - 210: 241, # '“' - 211: 242, # '”' - 212: 243, # '‘' - 213: 244, # '’' - 214: 245, # '÷' - 215: 246, # '„' - 216: 247, # 'Ў' - 217: 248, # 'ў' - 218: 249, # 'Џ' - 219: 250, # 'џ' - 220: 251, # '№' - 221: 252, # 'Ё' - 222: 68, # 'ё' - 223: 16, # 'я' - 224: 3, # 'а' - 225: 21, # 'б' - 226: 10, # 'в' - 227: 19, # 'г' - 228: 13, # 'д' - 229: 2, # 'е' - 230: 24, # 'ж' - 231: 20, # 'з' - 232: 4, # 'и' - 233: 23, # 'й' - 234: 11, # 'к' - 235: 8, # 'л' - 236: 12, # 'м' - 237: 5, # 'н' - 238: 1, # 'о' - 239: 15, # 'п' - 240: 9, # 'р' - 241: 7, # 'с' - 242: 6, # 'т' - 243: 14, # 'у' - 244: 39, # 'ф' - 245: 26, # 'х' - 246: 28, # 'ц' - 247: 22, # 'ч' - 248: 25, # 'ш' - 249: 29, # 'щ' - 250: 54, # 'ъ' - 251: 18, # 'ы' - 252: 17, # 'ь' - 253: 30, # 'э' - 254: 27, # 'ю' - 255: 255, # '€' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 142, # 'A' + 66: 143, # 'B' + 67: 144, # 'C' + 68: 145, # 'D' + 69: 146, # 'E' + 70: 147, # 'F' + 71: 148, # 'G' + 72: 149, # 'H' + 73: 150, # 'I' + 74: 151, # 'J' + 75: 152, # 'K' + 76: 74, # 'L' + 77: 153, # 'M' + 78: 75, # 'N' + 79: 154, # 'O' + 80: 155, # 'P' + 81: 156, # 'Q' + 82: 157, # 'R' + 83: 158, # 'S' + 84: 159, # 'T' + 85: 160, # 'U' + 86: 161, # 'V' + 87: 162, # 'W' + 88: 163, # 'X' + 89: 164, # 'Y' + 90: 165, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 71, # 'a' + 98: 172, # 'b' + 99: 66, # 'c' + 100: 173, # 'd' + 101: 65, # 'e' + 102: 174, # 'f' + 103: 76, # 'g' + 104: 175, # 'h' + 105: 64, # 'i' + 106: 176, # 'j' + 107: 177, # 'k' + 108: 77, # 'l' + 109: 72, # 'm' + 110: 178, # 'n' + 111: 69, # 'o' + 112: 67, # 'p' + 113: 179, # 'q' + 114: 78, # 'r' + 115: 73, # 's' + 116: 180, # 't' + 117: 181, # 'u' + 118: 79, # 'v' + 119: 182, # 'w' + 120: 183, # 'x' + 121: 184, # 'y' + 122: 185, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 37, # 'А' + 129: 44, # 'Б' + 130: 33, # 'В' + 131: 46, # 'Г' + 132: 41, # 'Д' + 133: 48, # 'Е' + 134: 56, # 'Ж' + 135: 51, # 'З' + 136: 42, # 'И' + 137: 60, # 'Й' + 138: 36, # 'К' + 139: 49, # 'Л' + 140: 38, # 'М' + 141: 31, # 'Н' + 142: 34, # 'О' + 143: 35, # 'П' + 144: 45, # 'Р' + 145: 32, # 'С' + 146: 40, # 'Т' + 147: 52, # 'У' + 148: 53, # 'Ф' + 149: 55, # 'Х' + 150: 58, # 'Ц' + 151: 50, # 'Ч' + 152: 57, # 'Ш' + 153: 63, # 'Щ' + 154: 70, # 'Ъ' + 155: 62, # 'Ы' + 156: 61, # 'Ь' + 157: 47, # 'Э' + 158: 59, # 'Ю' + 159: 43, # 'Я' + 160: 191, # '†' + 161: 192, # '°' + 162: 193, # 'Ґ' + 163: 194, # '£' + 164: 195, # '§' + 165: 196, # '•' + 166: 197, # '¶' + 167: 198, # 'І' + 168: 199, # '®' + 169: 200, # '©' + 170: 201, # '™' + 171: 202, # 'Ђ' + 172: 203, # 'ђ' + 173: 204, # '≠' + 174: 205, # 'Ѓ' + 175: 206, # 'ѓ' + 176: 207, # '∞' + 177: 208, # '±' + 178: 209, # '≤' + 179: 210, # '≥' + 180: 211, # 'і' + 181: 212, # 'µ' + 182: 213, # 'ґ' + 183: 214, # 'Ј' + 184: 215, # 'Є' + 185: 216, # 'є' + 186: 217, # 'Ї' + 187: 218, # 'ї' + 188: 219, # 'Љ' + 189: 220, # 'љ' + 190: 221, # 'Њ' + 191: 222, # 'њ' + 192: 223, # 'ј' + 193: 224, # 'Ѕ' + 194: 225, # '¬' + 195: 226, # '√' + 196: 227, # 'ƒ' + 197: 228, # '≈' + 198: 229, # '∆' + 199: 230, # '«' + 200: 231, # '»' + 201: 232, # '…' + 202: 233, # '\xa0' + 203: 234, # 'Ћ' + 204: 235, # 'ћ' + 205: 236, # 'Ќ' + 206: 237, # 'ќ' + 207: 238, # 'ѕ' + 208: 239, # '–' + 209: 240, # '—' + 210: 241, # '“' + 211: 242, # '”' + 212: 243, # '‘' + 213: 244, # '’' + 214: 245, # '÷' + 215: 246, # '„' + 216: 247, # 'Ў' + 217: 248, # 'ў' + 218: 249, # 'Џ' + 219: 250, # 'џ' + 220: 251, # '№' + 221: 252, # 'Ё' + 222: 68, # 'ё' + 223: 16, # 'я' + 224: 3, # 'а' + 225: 21, # 'б' + 226: 10, # 'в' + 227: 19, # 'г' + 228: 13, # 'д' + 229: 2, # 'е' + 230: 24, # 'ж' + 231: 20, # 'з' + 232: 4, # 'и' + 233: 23, # 'й' + 234: 11, # 'к' + 235: 8, # 'л' + 236: 12, # 'м' + 237: 5, # 'н' + 238: 1, # 'о' + 239: 15, # 'п' + 240: 9, # 'р' + 241: 7, # 'с' + 242: 6, # 'т' + 243: 14, # 'у' + 244: 39, # 'ф' + 245: 26, # 'х' + 246: 28, # 'ц' + 247: 22, # 'ч' + 248: 25, # 'ш' + 249: 29, # 'щ' + 250: 54, # 'ъ' + 251: 18, # 'ы' + 252: 17, # 'ь' + 253: 30, # 'э' + 254: 27, # 'ю' + 255: 255, # '€' } -MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='MacCyrillic', - language='Russian', - char_to_order_map=MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') +MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel( + charset_name="MacCyrillic", + language="Russian", + char_to_order_map=MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER, + language_model=RUSSIAN_LANG_MODEL, + typical_positive_ratio=0.976601, + keep_ascii_letters=False, + alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", +) ISO_8859_5_RUSSIAN_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 142, # 'A' - 66: 143, # 'B' - 67: 144, # 'C' - 68: 145, # 'D' - 69: 146, # 'E' - 70: 147, # 'F' - 71: 148, # 'G' - 72: 149, # 'H' - 73: 150, # 'I' - 74: 151, # 'J' - 75: 152, # 'K' - 76: 74, # 'L' - 77: 153, # 'M' - 78: 75, # 'N' - 79: 154, # 'O' - 80: 155, # 'P' - 81: 156, # 'Q' - 82: 157, # 'R' - 83: 158, # 'S' - 84: 159, # 'T' - 85: 160, # 'U' - 86: 161, # 'V' - 87: 162, # 'W' - 88: 163, # 'X' - 89: 164, # 'Y' - 90: 165, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 71, # 'a' - 98: 172, # 'b' - 99: 66, # 'c' - 100: 173, # 'd' - 101: 65, # 'e' - 102: 174, # 'f' - 103: 76, # 'g' - 104: 175, # 'h' - 105: 64, # 'i' - 106: 176, # 'j' - 107: 177, # 'k' - 108: 77, # 'l' - 109: 72, # 'm' - 110: 178, # 'n' - 111: 69, # 'o' - 112: 67, # 'p' - 113: 179, # 'q' - 114: 78, # 'r' - 115: 73, # 's' - 116: 180, # 't' - 117: 181, # 'u' - 118: 79, # 'v' - 119: 182, # 'w' - 120: 183, # 'x' - 121: 184, # 'y' - 122: 185, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 191, # '\x80' - 129: 192, # '\x81' - 130: 193, # '\x82' - 131: 194, # '\x83' - 132: 195, # '\x84' - 133: 196, # '\x85' - 134: 197, # '\x86' - 135: 198, # '\x87' - 136: 199, # '\x88' - 137: 200, # '\x89' - 138: 201, # '\x8a' - 139: 202, # '\x8b' - 140: 203, # '\x8c' - 141: 204, # '\x8d' - 142: 205, # '\x8e' - 143: 206, # '\x8f' - 144: 207, # '\x90' - 145: 208, # '\x91' - 146: 209, # '\x92' - 147: 210, # '\x93' - 148: 211, # '\x94' - 149: 212, # '\x95' - 150: 213, # '\x96' - 151: 214, # '\x97' - 152: 215, # '\x98' - 153: 216, # '\x99' - 154: 217, # '\x9a' - 155: 218, # '\x9b' - 156: 219, # '\x9c' - 157: 220, # '\x9d' - 158: 221, # '\x9e' - 159: 222, # '\x9f' - 160: 223, # '\xa0' - 161: 224, # 'Ё' - 162: 225, # 'Ђ' - 163: 226, # 'Ѓ' - 164: 227, # 'Є' - 165: 228, # 'Ѕ' - 166: 229, # 'І' - 167: 230, # 'Ї' - 168: 231, # 'Ј' - 169: 232, # 'Љ' - 170: 233, # 'Њ' - 171: 234, # 'Ћ' - 172: 235, # 'Ќ' - 173: 236, # '\xad' - 174: 237, # 'Ў' - 175: 238, # 'Џ' - 176: 37, # 'А' - 177: 44, # 'Б' - 178: 33, # 'В' - 179: 46, # 'Г' - 180: 41, # 'Д' - 181: 48, # 'Е' - 182: 56, # 'Ж' - 183: 51, # 'З' - 184: 42, # 'И' - 185: 60, # 'Й' - 186: 36, # 'К' - 187: 49, # 'Л' - 188: 38, # 'М' - 189: 31, # 'Н' - 190: 34, # 'О' - 191: 35, # 'П' - 192: 45, # 'Р' - 193: 32, # 'С' - 194: 40, # 'Т' - 195: 52, # 'У' - 196: 53, # 'Ф' - 197: 55, # 'Х' - 198: 58, # 'Ц' - 199: 50, # 'Ч' - 200: 57, # 'Ш' - 201: 63, # 'Щ' - 202: 70, # 'Ъ' - 203: 62, # 'Ы' - 204: 61, # 'Ь' - 205: 47, # 'Э' - 206: 59, # 'Ю' - 207: 43, # 'Я' - 208: 3, # 'а' - 209: 21, # 'б' - 210: 10, # 'в' - 211: 19, # 'г' - 212: 13, # 'д' - 213: 2, # 'е' - 214: 24, # 'ж' - 215: 20, # 'з' - 216: 4, # 'и' - 217: 23, # 'й' - 218: 11, # 'к' - 219: 8, # 'л' - 220: 12, # 'м' - 221: 5, # 'н' - 222: 1, # 'о' - 223: 15, # 'п' - 224: 9, # 'р' - 225: 7, # 'с' - 226: 6, # 'т' - 227: 14, # 'у' - 228: 39, # 'ф' - 229: 26, # 'х' - 230: 28, # 'ц' - 231: 22, # 'ч' - 232: 25, # 'ш' - 233: 29, # 'щ' - 234: 54, # 'ъ' - 235: 18, # 'ы' - 236: 17, # 'ь' - 237: 30, # 'э' - 238: 27, # 'ю' - 239: 16, # 'я' - 240: 239, # '№' - 241: 68, # 'ё' - 242: 240, # 'ђ' - 243: 241, # 'ѓ' - 244: 242, # 'є' - 245: 243, # 'ѕ' - 246: 244, # 'і' - 247: 245, # 'ї' - 248: 246, # 'ј' - 249: 247, # 'љ' - 250: 248, # 'њ' - 251: 249, # 'ћ' - 252: 250, # 'ќ' - 253: 251, # '§' - 254: 252, # 'ў' - 255: 255, # 'џ' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 142, # 'A' + 66: 143, # 'B' + 67: 144, # 'C' + 68: 145, # 'D' + 69: 146, # 'E' + 70: 147, # 'F' + 71: 148, # 'G' + 72: 149, # 'H' + 73: 150, # 'I' + 74: 151, # 'J' + 75: 152, # 'K' + 76: 74, # 'L' + 77: 153, # 'M' + 78: 75, # 'N' + 79: 154, # 'O' + 80: 155, # 'P' + 81: 156, # 'Q' + 82: 157, # 'R' + 83: 158, # 'S' + 84: 159, # 'T' + 85: 160, # 'U' + 86: 161, # 'V' + 87: 162, # 'W' + 88: 163, # 'X' + 89: 164, # 'Y' + 90: 165, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 71, # 'a' + 98: 172, # 'b' + 99: 66, # 'c' + 100: 173, # 'd' + 101: 65, # 'e' + 102: 174, # 'f' + 103: 76, # 'g' + 104: 175, # 'h' + 105: 64, # 'i' + 106: 176, # 'j' + 107: 177, # 'k' + 108: 77, # 'l' + 109: 72, # 'm' + 110: 178, # 'n' + 111: 69, # 'o' + 112: 67, # 'p' + 113: 179, # 'q' + 114: 78, # 'r' + 115: 73, # 's' + 116: 180, # 't' + 117: 181, # 'u' + 118: 79, # 'v' + 119: 182, # 'w' + 120: 183, # 'x' + 121: 184, # 'y' + 122: 185, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 191, # '\x80' + 129: 192, # '\x81' + 130: 193, # '\x82' + 131: 194, # '\x83' + 132: 195, # '\x84' + 133: 196, # '\x85' + 134: 197, # '\x86' + 135: 198, # '\x87' + 136: 199, # '\x88' + 137: 200, # '\x89' + 138: 201, # '\x8a' + 139: 202, # '\x8b' + 140: 203, # '\x8c' + 141: 204, # '\x8d' + 142: 205, # '\x8e' + 143: 206, # '\x8f' + 144: 207, # '\x90' + 145: 208, # '\x91' + 146: 209, # '\x92' + 147: 210, # '\x93' + 148: 211, # '\x94' + 149: 212, # '\x95' + 150: 213, # '\x96' + 151: 214, # '\x97' + 152: 215, # '\x98' + 153: 216, # '\x99' + 154: 217, # '\x9a' + 155: 218, # '\x9b' + 156: 219, # '\x9c' + 157: 220, # '\x9d' + 158: 221, # '\x9e' + 159: 222, # '\x9f' + 160: 223, # '\xa0' + 161: 224, # 'Ё' + 162: 225, # 'Ђ' + 163: 226, # 'Ѓ' + 164: 227, # 'Є' + 165: 228, # 'Ѕ' + 166: 229, # 'І' + 167: 230, # 'Ї' + 168: 231, # 'Ј' + 169: 232, # 'Љ' + 170: 233, # 'Њ' + 171: 234, # 'Ћ' + 172: 235, # 'Ќ' + 173: 236, # '\xad' + 174: 237, # 'Ў' + 175: 238, # 'Џ' + 176: 37, # 'А' + 177: 44, # 'Б' + 178: 33, # 'В' + 179: 46, # 'Г' + 180: 41, # 'Д' + 181: 48, # 'Е' + 182: 56, # 'Ж' + 183: 51, # 'З' + 184: 42, # 'И' + 185: 60, # 'Й' + 186: 36, # 'К' + 187: 49, # 'Л' + 188: 38, # 'М' + 189: 31, # 'Н' + 190: 34, # 'О' + 191: 35, # 'П' + 192: 45, # 'Р' + 193: 32, # 'С' + 194: 40, # 'Т' + 195: 52, # 'У' + 196: 53, # 'Ф' + 197: 55, # 'Х' + 198: 58, # 'Ц' + 199: 50, # 'Ч' + 200: 57, # 'Ш' + 201: 63, # 'Щ' + 202: 70, # 'Ъ' + 203: 62, # 'Ы' + 204: 61, # 'Ь' + 205: 47, # 'Э' + 206: 59, # 'Ю' + 207: 43, # 'Я' + 208: 3, # 'а' + 209: 21, # 'б' + 210: 10, # 'в' + 211: 19, # 'г' + 212: 13, # 'д' + 213: 2, # 'е' + 214: 24, # 'ж' + 215: 20, # 'з' + 216: 4, # 'и' + 217: 23, # 'й' + 218: 11, # 'к' + 219: 8, # 'л' + 220: 12, # 'м' + 221: 5, # 'н' + 222: 1, # 'о' + 223: 15, # 'п' + 224: 9, # 'р' + 225: 7, # 'с' + 226: 6, # 'т' + 227: 14, # 'у' + 228: 39, # 'ф' + 229: 26, # 'х' + 230: 28, # 'ц' + 231: 22, # 'ч' + 232: 25, # 'ш' + 233: 29, # 'щ' + 234: 54, # 'ъ' + 235: 18, # 'ы' + 236: 17, # 'ь' + 237: 30, # 'э' + 238: 27, # 'ю' + 239: 16, # 'я' + 240: 239, # '№' + 241: 68, # 'ё' + 242: 240, # 'ђ' + 243: 241, # 'ѓ' + 244: 242, # 'є' + 245: 243, # 'ѕ' + 246: 244, # 'і' + 247: 245, # 'ї' + 248: 246, # 'ј' + 249: 247, # 'љ' + 250: 248, # 'њ' + 251: 249, # 'ћ' + 252: 250, # 'ќ' + 253: 251, # '§' + 254: 252, # 'ў' + 255: 255, # 'џ' } -ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-5', - language='Russian', - char_to_order_map=ISO_8859_5_RUSSIAN_CHAR_TO_ORDER, - language_model=RUSSIAN_LANG_MODEL, - typical_positive_ratio=0.976601, - keep_ascii_letters=False, - alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё') - +ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel( + charset_name="ISO-8859-5", + language="Russian", + char_to_order_map=ISO_8859_5_RUSSIAN_CHAR_TO_ORDER, + language_model=RUSSIAN_LANG_MODEL, + typical_positive_ratio=0.976601, + keep_ascii_letters=False, + alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё", +) diff --git a/env/Lib/site-packages/pip/_vendor/chardet/langthaimodel.py b/env/Lib/site-packages/pip/_vendor/chardet/langthaimodel.py index 9a37db573881e426acc756db236be0eb052ef0d9..489cad930e0029fc2f8e5111df1bad38151a07a9 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/langthaimodel.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/langthaimodel.py @@ -1,9 +1,5 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - # 3: Positive # 2: Likely # 1: Unlikely @@ -4115,269 +4111,270 @@ THAI_LANG_MODEL = { # Character Mapping Table(s): TIS_620_THAI_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 182, # 'A' - 66: 106, # 'B' - 67: 107, # 'C' - 68: 100, # 'D' - 69: 183, # 'E' - 70: 184, # 'F' - 71: 185, # 'G' - 72: 101, # 'H' - 73: 94, # 'I' - 74: 186, # 'J' - 75: 187, # 'K' - 76: 108, # 'L' - 77: 109, # 'M' - 78: 110, # 'N' - 79: 111, # 'O' - 80: 188, # 'P' - 81: 189, # 'Q' - 82: 190, # 'R' - 83: 89, # 'S' - 84: 95, # 'T' - 85: 112, # 'U' - 86: 113, # 'V' - 87: 191, # 'W' - 88: 192, # 'X' - 89: 193, # 'Y' - 90: 194, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 64, # 'a' - 98: 72, # 'b' - 99: 73, # 'c' - 100: 114, # 'd' - 101: 74, # 'e' - 102: 115, # 'f' - 103: 116, # 'g' - 104: 102, # 'h' - 105: 81, # 'i' - 106: 201, # 'j' - 107: 117, # 'k' - 108: 90, # 'l' - 109: 103, # 'm' - 110: 78, # 'n' - 111: 82, # 'o' - 112: 96, # 'p' - 113: 202, # 'q' - 114: 91, # 'r' - 115: 79, # 's' - 116: 84, # 't' - 117: 104, # 'u' - 118: 105, # 'v' - 119: 97, # 'w' - 120: 98, # 'x' - 121: 92, # 'y' - 122: 203, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 209, # '\x80' - 129: 210, # '\x81' - 130: 211, # '\x82' - 131: 212, # '\x83' - 132: 213, # '\x84' - 133: 88, # '\x85' - 134: 214, # '\x86' - 135: 215, # '\x87' - 136: 216, # '\x88' - 137: 217, # '\x89' - 138: 218, # '\x8a' - 139: 219, # '\x8b' - 140: 220, # '\x8c' - 141: 118, # '\x8d' - 142: 221, # '\x8e' - 143: 222, # '\x8f' - 144: 223, # '\x90' - 145: 224, # '\x91' - 146: 99, # '\x92' - 147: 85, # '\x93' - 148: 83, # '\x94' - 149: 225, # '\x95' - 150: 226, # '\x96' - 151: 227, # '\x97' - 152: 228, # '\x98' - 153: 229, # '\x99' - 154: 230, # '\x9a' - 155: 231, # '\x9b' - 156: 232, # '\x9c' - 157: 233, # '\x9d' - 158: 234, # '\x9e' - 159: 235, # '\x9f' - 160: 236, # None - 161: 5, # 'ก' - 162: 30, # 'ข' - 163: 237, # 'ฃ' - 164: 24, # 'ค' - 165: 238, # 'ฅ' - 166: 75, # 'ฆ' - 167: 8, # 'ง' - 168: 26, # 'จ' - 169: 52, # 'ฉ' - 170: 34, # 'ช' - 171: 51, # 'ซ' - 172: 119, # 'ฌ' - 173: 47, # 'ญ' - 174: 58, # 'ฎ' - 175: 57, # 'ฏ' - 176: 49, # 'ฐ' - 177: 53, # 'ฑ' - 178: 55, # 'ฒ' - 179: 43, # 'ณ' - 180: 20, # 'ด' - 181: 19, # 'ต' - 182: 44, # 'ถ' - 183: 14, # 'ท' - 184: 48, # 'ธ' - 185: 3, # 'น' - 186: 17, # 'บ' - 187: 25, # 'ป' - 188: 39, # 'ผ' - 189: 62, # 'ฝ' - 190: 31, # 'พ' - 191: 54, # 'ฟ' - 192: 45, # 'ภ' - 193: 9, # 'ม' - 194: 16, # 'ย' - 195: 2, # 'ร' - 196: 61, # 'ฤ' - 197: 15, # 'ล' - 198: 239, # 'ฦ' - 199: 12, # 'ว' - 200: 42, # 'ศ' - 201: 46, # 'ษ' - 202: 18, # 'ส' - 203: 21, # 'ห' - 204: 76, # 'ฬ' - 205: 4, # 'อ' - 206: 66, # 'ฮ' - 207: 63, # 'ฯ' - 208: 22, # 'ะ' - 209: 10, # 'ั' - 210: 1, # 'า' - 211: 36, # 'ำ' - 212: 23, # 'ิ' - 213: 13, # 'ี' - 214: 40, # 'ึ' - 215: 27, # 'ื' - 216: 32, # 'ุ' - 217: 35, # 'ู' - 218: 86, # 'ฺ' - 219: 240, # None - 220: 241, # None - 221: 242, # None - 222: 243, # None - 223: 244, # '฿' - 224: 11, # 'เ' - 225: 28, # 'แ' - 226: 41, # 'โ' - 227: 29, # 'ใ' - 228: 33, # 'ไ' - 229: 245, # 'ๅ' - 230: 50, # 'ๆ' - 231: 37, # '็' - 232: 6, # '่' - 233: 7, # '้' - 234: 67, # '๊' - 235: 77, # '๋' - 236: 38, # '์' - 237: 93, # 'ํ' - 238: 246, # '๎' - 239: 247, # '๏' - 240: 68, # '๐' - 241: 56, # '๑' - 242: 59, # '๒' - 243: 65, # '๓' - 244: 69, # '๔' - 245: 60, # '๕' - 246: 70, # '๖' - 247: 80, # '๗' - 248: 71, # '๘' - 249: 87, # '๙' - 250: 248, # '๚' - 251: 249, # '๛' - 252: 250, # None - 253: 251, # None - 254: 252, # None - 255: 253, # None + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 254, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 254, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 253, # ' ' + 33: 253, # '!' + 34: 253, # '"' + 35: 253, # '#' + 36: 253, # '$' + 37: 253, # '%' + 38: 253, # '&' + 39: 253, # "'" + 40: 253, # '(' + 41: 253, # ')' + 42: 253, # '*' + 43: 253, # '+' + 44: 253, # ',' + 45: 253, # '-' + 46: 253, # '.' + 47: 253, # '/' + 48: 252, # '0' + 49: 252, # '1' + 50: 252, # '2' + 51: 252, # '3' + 52: 252, # '4' + 53: 252, # '5' + 54: 252, # '6' + 55: 252, # '7' + 56: 252, # '8' + 57: 252, # '9' + 58: 253, # ':' + 59: 253, # ';' + 60: 253, # '<' + 61: 253, # '=' + 62: 253, # '>' + 63: 253, # '?' + 64: 253, # '@' + 65: 182, # 'A' + 66: 106, # 'B' + 67: 107, # 'C' + 68: 100, # 'D' + 69: 183, # 'E' + 70: 184, # 'F' + 71: 185, # 'G' + 72: 101, # 'H' + 73: 94, # 'I' + 74: 186, # 'J' + 75: 187, # 'K' + 76: 108, # 'L' + 77: 109, # 'M' + 78: 110, # 'N' + 79: 111, # 'O' + 80: 188, # 'P' + 81: 189, # 'Q' + 82: 190, # 'R' + 83: 89, # 'S' + 84: 95, # 'T' + 85: 112, # 'U' + 86: 113, # 'V' + 87: 191, # 'W' + 88: 192, # 'X' + 89: 193, # 'Y' + 90: 194, # 'Z' + 91: 253, # '[' + 92: 253, # '\\' + 93: 253, # ']' + 94: 253, # '^' + 95: 253, # '_' + 96: 253, # '`' + 97: 64, # 'a' + 98: 72, # 'b' + 99: 73, # 'c' + 100: 114, # 'd' + 101: 74, # 'e' + 102: 115, # 'f' + 103: 116, # 'g' + 104: 102, # 'h' + 105: 81, # 'i' + 106: 201, # 'j' + 107: 117, # 'k' + 108: 90, # 'l' + 109: 103, # 'm' + 110: 78, # 'n' + 111: 82, # 'o' + 112: 96, # 'p' + 113: 202, # 'q' + 114: 91, # 'r' + 115: 79, # 's' + 116: 84, # 't' + 117: 104, # 'u' + 118: 105, # 'v' + 119: 97, # 'w' + 120: 98, # 'x' + 121: 92, # 'y' + 122: 203, # 'z' + 123: 253, # '{' + 124: 253, # '|' + 125: 253, # '}' + 126: 253, # '~' + 127: 253, # '\x7f' + 128: 209, # '\x80' + 129: 210, # '\x81' + 130: 211, # '\x82' + 131: 212, # '\x83' + 132: 213, # '\x84' + 133: 88, # '\x85' + 134: 214, # '\x86' + 135: 215, # '\x87' + 136: 216, # '\x88' + 137: 217, # '\x89' + 138: 218, # '\x8a' + 139: 219, # '\x8b' + 140: 220, # '\x8c' + 141: 118, # '\x8d' + 142: 221, # '\x8e' + 143: 222, # '\x8f' + 144: 223, # '\x90' + 145: 224, # '\x91' + 146: 99, # '\x92' + 147: 85, # '\x93' + 148: 83, # '\x94' + 149: 225, # '\x95' + 150: 226, # '\x96' + 151: 227, # '\x97' + 152: 228, # '\x98' + 153: 229, # '\x99' + 154: 230, # '\x9a' + 155: 231, # '\x9b' + 156: 232, # '\x9c' + 157: 233, # '\x9d' + 158: 234, # '\x9e' + 159: 235, # '\x9f' + 160: 236, # None + 161: 5, # 'ก' + 162: 30, # 'ข' + 163: 237, # 'ฃ' + 164: 24, # 'ค' + 165: 238, # 'ฅ' + 166: 75, # 'ฆ' + 167: 8, # 'ง' + 168: 26, # 'จ' + 169: 52, # 'ฉ' + 170: 34, # 'ช' + 171: 51, # 'ซ' + 172: 119, # 'ฌ' + 173: 47, # 'ญ' + 174: 58, # 'ฎ' + 175: 57, # 'ฏ' + 176: 49, # 'ฐ' + 177: 53, # 'ฑ' + 178: 55, # 'ฒ' + 179: 43, # 'ณ' + 180: 20, # 'ด' + 181: 19, # 'ต' + 182: 44, # 'ถ' + 183: 14, # 'ท' + 184: 48, # 'ธ' + 185: 3, # 'น' + 186: 17, # 'บ' + 187: 25, # 'ป' + 188: 39, # 'ผ' + 189: 62, # 'ฝ' + 190: 31, # 'พ' + 191: 54, # 'ฟ' + 192: 45, # 'ภ' + 193: 9, # 'ม' + 194: 16, # 'ย' + 195: 2, # 'ร' + 196: 61, # 'ฤ' + 197: 15, # 'ล' + 198: 239, # 'ฦ' + 199: 12, # 'ว' + 200: 42, # 'ศ' + 201: 46, # 'ษ' + 202: 18, # 'ส' + 203: 21, # 'ห' + 204: 76, # 'ฬ' + 205: 4, # 'อ' + 206: 66, # 'ฮ' + 207: 63, # 'ฯ' + 208: 22, # 'ะ' + 209: 10, # 'ั' + 210: 1, # 'า' + 211: 36, # 'ำ' + 212: 23, # 'ิ' + 213: 13, # 'ี' + 214: 40, # 'ึ' + 215: 27, # 'ื' + 216: 32, # 'ุ' + 217: 35, # 'ู' + 218: 86, # 'ฺ' + 219: 240, # None + 220: 241, # None + 221: 242, # None + 222: 243, # None + 223: 244, # '฿' + 224: 11, # 'เ' + 225: 28, # 'แ' + 226: 41, # 'โ' + 227: 29, # 'ใ' + 228: 33, # 'ไ' + 229: 245, # 'ๅ' + 230: 50, # 'ๆ' + 231: 37, # '็' + 232: 6, # '่' + 233: 7, # '้' + 234: 67, # '๊' + 235: 77, # '๋' + 236: 38, # '์' + 237: 93, # 'ํ' + 238: 246, # '๎' + 239: 247, # '๏' + 240: 68, # '๐' + 241: 56, # '๑' + 242: 59, # '๒' + 243: 65, # '๓' + 244: 69, # '๔' + 245: 60, # '๕' + 246: 70, # '๖' + 247: 80, # '๗' + 248: 71, # '๘' + 249: 87, # '๙' + 250: 248, # '๚' + 251: 249, # '๛' + 252: 250, # None + 253: 251, # None + 254: 252, # None + 255: 253, # None } -TIS_620_THAI_MODEL = SingleByteCharSetModel(charset_name='TIS-620', - language='Thai', - char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER, - language_model=THAI_LANG_MODEL, - typical_positive_ratio=0.926386, - keep_ascii_letters=False, - alphabet='กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛') - +TIS_620_THAI_MODEL = SingleByteCharSetModel( + charset_name="TIS-620", + language="Thai", + char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER, + language_model=THAI_LANG_MODEL, + typical_positive_ratio=0.926386, + keep_ascii_letters=False, + alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛", +) diff --git a/env/Lib/site-packages/pip/_vendor/chardet/langturkishmodel.py b/env/Lib/site-packages/pip/_vendor/chardet/langturkishmodel.py index 43f4230aead2fd8c9f685bd0a726cf0723a9d98d..291857c25c83f91a151c1d7760e8e5e09c1ee238 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/langturkishmodel.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/langturkishmodel.py @@ -1,9 +1,5 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - # 3: Positive # 2: Likely # 1: Unlikely @@ -4115,269 +4111,270 @@ TURKISH_LANG_MODEL = { # Character Mapping Table(s): ISO_8859_9_TURKISH_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 255, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 255, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 255, # ' ' - 33: 255, # '!' - 34: 255, # '"' - 35: 255, # '#' - 36: 255, # '$' - 37: 255, # '%' - 38: 255, # '&' - 39: 255, # "'" - 40: 255, # '(' - 41: 255, # ')' - 42: 255, # '*' - 43: 255, # '+' - 44: 255, # ',' - 45: 255, # '-' - 46: 255, # '.' - 47: 255, # '/' - 48: 255, # '0' - 49: 255, # '1' - 50: 255, # '2' - 51: 255, # '3' - 52: 255, # '4' - 53: 255, # '5' - 54: 255, # '6' - 55: 255, # '7' - 56: 255, # '8' - 57: 255, # '9' - 58: 255, # ':' - 59: 255, # ';' - 60: 255, # '<' - 61: 255, # '=' - 62: 255, # '>' - 63: 255, # '?' - 64: 255, # '@' - 65: 23, # 'A' - 66: 37, # 'B' - 67: 47, # 'C' - 68: 39, # 'D' - 69: 29, # 'E' - 70: 52, # 'F' - 71: 36, # 'G' - 72: 45, # 'H' - 73: 53, # 'I' - 74: 60, # 'J' - 75: 16, # 'K' - 76: 49, # 'L' - 77: 20, # 'M' - 78: 46, # 'N' - 79: 42, # 'O' - 80: 48, # 'P' - 81: 69, # 'Q' - 82: 44, # 'R' - 83: 35, # 'S' - 84: 31, # 'T' - 85: 51, # 'U' - 86: 38, # 'V' - 87: 62, # 'W' - 88: 65, # 'X' - 89: 43, # 'Y' - 90: 56, # 'Z' - 91: 255, # '[' - 92: 255, # '\\' - 93: 255, # ']' - 94: 255, # '^' - 95: 255, # '_' - 96: 255, # '`' - 97: 1, # 'a' - 98: 21, # 'b' - 99: 28, # 'c' - 100: 12, # 'd' - 101: 2, # 'e' - 102: 18, # 'f' - 103: 27, # 'g' - 104: 25, # 'h' - 105: 3, # 'i' - 106: 24, # 'j' - 107: 10, # 'k' - 108: 5, # 'l' - 109: 13, # 'm' - 110: 4, # 'n' - 111: 15, # 'o' - 112: 26, # 'p' - 113: 64, # 'q' - 114: 7, # 'r' - 115: 8, # 's' - 116: 9, # 't' - 117: 14, # 'u' - 118: 32, # 'v' - 119: 57, # 'w' - 120: 58, # 'x' - 121: 11, # 'y' - 122: 22, # 'z' - 123: 255, # '{' - 124: 255, # '|' - 125: 255, # '}' - 126: 255, # '~' - 127: 255, # '\x7f' - 128: 180, # '\x80' - 129: 179, # '\x81' - 130: 178, # '\x82' - 131: 177, # '\x83' - 132: 176, # '\x84' - 133: 175, # '\x85' - 134: 174, # '\x86' - 135: 173, # '\x87' - 136: 172, # '\x88' - 137: 171, # '\x89' - 138: 170, # '\x8a' - 139: 169, # '\x8b' - 140: 168, # '\x8c' - 141: 167, # '\x8d' - 142: 166, # '\x8e' - 143: 165, # '\x8f' - 144: 164, # '\x90' - 145: 163, # '\x91' - 146: 162, # '\x92' - 147: 161, # '\x93' - 148: 160, # '\x94' - 149: 159, # '\x95' - 150: 101, # '\x96' - 151: 158, # '\x97' - 152: 157, # '\x98' - 153: 156, # '\x99' - 154: 155, # '\x9a' - 155: 154, # '\x9b' - 156: 153, # '\x9c' - 157: 152, # '\x9d' - 158: 151, # '\x9e' - 159: 106, # '\x9f' - 160: 150, # '\xa0' - 161: 149, # '¡' - 162: 148, # '¢' - 163: 147, # '£' - 164: 146, # '¤' - 165: 145, # '¥' - 166: 144, # '¦' - 167: 100, # '§' - 168: 143, # '¨' - 169: 142, # '©' - 170: 141, # 'ª' - 171: 140, # '«' - 172: 139, # '¬' - 173: 138, # '\xad' - 174: 137, # '®' - 175: 136, # '¯' - 176: 94, # '°' - 177: 80, # '±' - 178: 93, # '²' - 179: 135, # '³' - 180: 105, # '´' - 181: 134, # 'µ' - 182: 133, # '¶' - 183: 63, # '·' - 184: 132, # '¸' - 185: 131, # '¹' - 186: 130, # 'º' - 187: 129, # '»' - 188: 128, # '¼' - 189: 127, # '½' - 190: 126, # '¾' - 191: 125, # '¿' - 192: 124, # 'À' - 193: 104, # 'Á' - 194: 73, # 'Â' - 195: 99, # 'Ã' - 196: 79, # 'Ä' - 197: 85, # 'Å' - 198: 123, # 'Æ' - 199: 54, # 'Ç' - 200: 122, # 'È' - 201: 98, # 'É' - 202: 92, # 'Ê' - 203: 121, # 'Ë' - 204: 120, # 'Ì' - 205: 91, # 'Í' - 206: 103, # 'Î' - 207: 119, # 'Ï' - 208: 68, # 'Ğ' - 209: 118, # 'Ñ' - 210: 117, # 'Ò' - 211: 97, # 'Ó' - 212: 116, # 'Ô' - 213: 115, # 'Õ' - 214: 50, # 'Ö' - 215: 90, # '×' - 216: 114, # 'Ø' - 217: 113, # 'Ù' - 218: 112, # 'Ú' - 219: 111, # 'Û' - 220: 55, # 'Ü' - 221: 41, # 'İ' - 222: 40, # 'Ş' - 223: 86, # 'ß' - 224: 89, # 'à' - 225: 70, # 'á' - 226: 59, # 'â' - 227: 78, # 'ã' - 228: 71, # 'ä' - 229: 82, # 'å' - 230: 88, # 'æ' - 231: 33, # 'ç' - 232: 77, # 'è' - 233: 66, # 'é' - 234: 84, # 'ê' - 235: 83, # 'ë' - 236: 110, # 'ì' - 237: 75, # 'í' - 238: 61, # 'î' - 239: 96, # 'ï' - 240: 30, # 'ğ' - 241: 67, # 'ñ' - 242: 109, # 'ò' - 243: 74, # 'ó' - 244: 87, # 'ô' - 245: 102, # 'õ' - 246: 34, # 'ö' - 247: 95, # '÷' - 248: 81, # 'ø' - 249: 108, # 'ù' - 250: 76, # 'ú' - 251: 72, # 'û' - 252: 17, # 'ü' - 253: 6, # 'ı' - 254: 19, # 'ş' - 255: 107, # 'ÿ' + 0: 255, # '\x00' + 1: 255, # '\x01' + 2: 255, # '\x02' + 3: 255, # '\x03' + 4: 255, # '\x04' + 5: 255, # '\x05' + 6: 255, # '\x06' + 7: 255, # '\x07' + 8: 255, # '\x08' + 9: 255, # '\t' + 10: 255, # '\n' + 11: 255, # '\x0b' + 12: 255, # '\x0c' + 13: 255, # '\r' + 14: 255, # '\x0e' + 15: 255, # '\x0f' + 16: 255, # '\x10' + 17: 255, # '\x11' + 18: 255, # '\x12' + 19: 255, # '\x13' + 20: 255, # '\x14' + 21: 255, # '\x15' + 22: 255, # '\x16' + 23: 255, # '\x17' + 24: 255, # '\x18' + 25: 255, # '\x19' + 26: 255, # '\x1a' + 27: 255, # '\x1b' + 28: 255, # '\x1c' + 29: 255, # '\x1d' + 30: 255, # '\x1e' + 31: 255, # '\x1f' + 32: 255, # ' ' + 33: 255, # '!' + 34: 255, # '"' + 35: 255, # '#' + 36: 255, # '$' + 37: 255, # '%' + 38: 255, # '&' + 39: 255, # "'" + 40: 255, # '(' + 41: 255, # ')' + 42: 255, # '*' + 43: 255, # '+' + 44: 255, # ',' + 45: 255, # '-' + 46: 255, # '.' + 47: 255, # '/' + 48: 255, # '0' + 49: 255, # '1' + 50: 255, # '2' + 51: 255, # '3' + 52: 255, # '4' + 53: 255, # '5' + 54: 255, # '6' + 55: 255, # '7' + 56: 255, # '8' + 57: 255, # '9' + 58: 255, # ':' + 59: 255, # ';' + 60: 255, # '<' + 61: 255, # '=' + 62: 255, # '>' + 63: 255, # '?' + 64: 255, # '@' + 65: 23, # 'A' + 66: 37, # 'B' + 67: 47, # 'C' + 68: 39, # 'D' + 69: 29, # 'E' + 70: 52, # 'F' + 71: 36, # 'G' + 72: 45, # 'H' + 73: 53, # 'I' + 74: 60, # 'J' + 75: 16, # 'K' + 76: 49, # 'L' + 77: 20, # 'M' + 78: 46, # 'N' + 79: 42, # 'O' + 80: 48, # 'P' + 81: 69, # 'Q' + 82: 44, # 'R' + 83: 35, # 'S' + 84: 31, # 'T' + 85: 51, # 'U' + 86: 38, # 'V' + 87: 62, # 'W' + 88: 65, # 'X' + 89: 43, # 'Y' + 90: 56, # 'Z' + 91: 255, # '[' + 92: 255, # '\\' + 93: 255, # ']' + 94: 255, # '^' + 95: 255, # '_' + 96: 255, # '`' + 97: 1, # 'a' + 98: 21, # 'b' + 99: 28, # 'c' + 100: 12, # 'd' + 101: 2, # 'e' + 102: 18, # 'f' + 103: 27, # 'g' + 104: 25, # 'h' + 105: 3, # 'i' + 106: 24, # 'j' + 107: 10, # 'k' + 108: 5, # 'l' + 109: 13, # 'm' + 110: 4, # 'n' + 111: 15, # 'o' + 112: 26, # 'p' + 113: 64, # 'q' + 114: 7, # 'r' + 115: 8, # 's' + 116: 9, # 't' + 117: 14, # 'u' + 118: 32, # 'v' + 119: 57, # 'w' + 120: 58, # 'x' + 121: 11, # 'y' + 122: 22, # 'z' + 123: 255, # '{' + 124: 255, # '|' + 125: 255, # '}' + 126: 255, # '~' + 127: 255, # '\x7f' + 128: 180, # '\x80' + 129: 179, # '\x81' + 130: 178, # '\x82' + 131: 177, # '\x83' + 132: 176, # '\x84' + 133: 175, # '\x85' + 134: 174, # '\x86' + 135: 173, # '\x87' + 136: 172, # '\x88' + 137: 171, # '\x89' + 138: 170, # '\x8a' + 139: 169, # '\x8b' + 140: 168, # '\x8c' + 141: 167, # '\x8d' + 142: 166, # '\x8e' + 143: 165, # '\x8f' + 144: 164, # '\x90' + 145: 163, # '\x91' + 146: 162, # '\x92' + 147: 161, # '\x93' + 148: 160, # '\x94' + 149: 159, # '\x95' + 150: 101, # '\x96' + 151: 158, # '\x97' + 152: 157, # '\x98' + 153: 156, # '\x99' + 154: 155, # '\x9a' + 155: 154, # '\x9b' + 156: 153, # '\x9c' + 157: 152, # '\x9d' + 158: 151, # '\x9e' + 159: 106, # '\x9f' + 160: 150, # '\xa0' + 161: 149, # '¡' + 162: 148, # '¢' + 163: 147, # '£' + 164: 146, # '¤' + 165: 145, # '¥' + 166: 144, # '¦' + 167: 100, # '§' + 168: 143, # '¨' + 169: 142, # '©' + 170: 141, # 'ª' + 171: 140, # '«' + 172: 139, # '¬' + 173: 138, # '\xad' + 174: 137, # '®' + 175: 136, # '¯' + 176: 94, # '°' + 177: 80, # '±' + 178: 93, # '²' + 179: 135, # '³' + 180: 105, # '´' + 181: 134, # 'µ' + 182: 133, # '¶' + 183: 63, # '·' + 184: 132, # '¸' + 185: 131, # '¹' + 186: 130, # 'º' + 187: 129, # '»' + 188: 128, # '¼' + 189: 127, # '½' + 190: 126, # '¾' + 191: 125, # '¿' + 192: 124, # 'À' + 193: 104, # 'Á' + 194: 73, # 'Â' + 195: 99, # 'Ã' + 196: 79, # 'Ä' + 197: 85, # 'Å' + 198: 123, # 'Æ' + 199: 54, # 'Ç' + 200: 122, # 'È' + 201: 98, # 'É' + 202: 92, # 'Ê' + 203: 121, # 'Ë' + 204: 120, # 'Ì' + 205: 91, # 'Í' + 206: 103, # 'Î' + 207: 119, # 'Ï' + 208: 68, # 'Ğ' + 209: 118, # 'Ñ' + 210: 117, # 'Ò' + 211: 97, # 'Ó' + 212: 116, # 'Ô' + 213: 115, # 'Õ' + 214: 50, # 'Ö' + 215: 90, # '×' + 216: 114, # 'Ø' + 217: 113, # 'Ù' + 218: 112, # 'Ú' + 219: 111, # 'Û' + 220: 55, # 'Ü' + 221: 41, # 'İ' + 222: 40, # 'Ş' + 223: 86, # 'ß' + 224: 89, # 'à' + 225: 70, # 'á' + 226: 59, # 'â' + 227: 78, # 'ã' + 228: 71, # 'ä' + 229: 82, # 'å' + 230: 88, # 'æ' + 231: 33, # 'ç' + 232: 77, # 'è' + 233: 66, # 'é' + 234: 84, # 'ê' + 235: 83, # 'ë' + 236: 110, # 'ì' + 237: 75, # 'í' + 238: 61, # 'î' + 239: 96, # 'ï' + 240: 30, # 'ğ' + 241: 67, # 'ñ' + 242: 109, # 'ò' + 243: 74, # 'ó' + 244: 87, # 'ô' + 245: 102, # 'õ' + 246: 34, # 'ö' + 247: 95, # '÷' + 248: 81, # 'ø' + 249: 108, # 'ù' + 250: 76, # 'ú' + 251: 72, # 'û' + 252: 17, # 'ü' + 253: 6, # 'ı' + 254: 19, # 'ş' + 255: 107, # 'ÿ' } -ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-9', - language='Turkish', - char_to_order_map=ISO_8859_9_TURKISH_CHAR_TO_ORDER, - language_model=TURKISH_LANG_MODEL, - typical_positive_ratio=0.97029, - keep_ascii_letters=True, - alphabet='ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş') - +ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel( + charset_name="ISO-8859-9", + language="Turkish", + char_to_order_map=ISO_8859_9_TURKISH_CHAR_TO_ORDER, + language_model=TURKISH_LANG_MODEL, + typical_positive_ratio=0.97029, + keep_ascii_letters=True, + alphabet="ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş", +) diff --git a/env/Lib/site-packages/pip/_vendor/chardet/latin1prober.py b/env/Lib/site-packages/pip/_vendor/chardet/latin1prober.py index 7d1e8c20fb09ddaa0254ae74cbd4425ffdc5dcdc..59a01d91b87d4282bede38ade7cc78c0f7552d0e 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/latin1prober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/latin1prober.py @@ -26,6 +26,8 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +from typing import List, Union + from .charsetprober import CharSetProber from .enums import ProbingState @@ -41,6 +43,7 @@ ASV = 6 # accent small vowel ASO = 7 # accent small other CLASS_NUM = 8 # total classes +# fmt: off Latin1_CharToClass = ( OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F @@ -91,34 +94,34 @@ Latin1ClassModel = ( 0, 3, 1, 3, 1, 1, 1, 3, # ASV 0, 3, 1, 3, 1, 1, 3, 3, # ASO ) +# fmt: on class Latin1Prober(CharSetProber): - def __init__(self): - super(Latin1Prober, self).__init__() - self._last_char_class = None - self._freq_counter = None + def __init__(self) -> None: + super().__init__() + self._last_char_class = OTH + self._freq_counter: List[int] = [] self.reset() - def reset(self): + def reset(self) -> None: self._last_char_class = OTH self._freq_counter = [0] * FREQ_CAT_NUM - CharSetProber.reset(self) + super().reset() @property - def charset_name(self): + def charset_name(self) -> str: return "ISO-8859-1" @property - def language(self): + def language(self) -> str: return "" - def feed(self, byte_str): - byte_str = self.filter_with_english_letters(byte_str) + def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: + byte_str = self.remove_xml_tags(byte_str) for c in byte_str: char_class = Latin1_CharToClass[c] - freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) - + char_class] + freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) + char_class] if freq == 0: self._state = ProbingState.NOT_ME break @@ -127,19 +130,18 @@ class Latin1Prober(CharSetProber): return self.state - def get_confidence(self): + def get_confidence(self) -> float: if self.state == ProbingState.NOT_ME: return 0.01 total = sum(self._freq_counter) - if total < 0.01: - confidence = 0.0 - else: - confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0) - / total) - if confidence < 0.0: - confidence = 0.0 + confidence = ( + 0.0 + if total < 0.01 + else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total + ) + confidence = max(confidence, 0.0) # lower the confidence of latin1 so that other more accurate # detector can take priority. - confidence = confidence * 0.73 + confidence *= 0.73 return confidence diff --git a/env/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py b/env/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py index 6256ecfd1e2c9ac4cfa3fac359cd12dce85b759c..666307e8fe0608c69f2b6578a49794e1e20a139a 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py @@ -27,8 +27,12 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +from typing import Optional, Union + +from .chardistribution import CharDistributionAnalysis from .charsetprober import CharSetProber -from .enums import ProbingState, MachineState +from .codingstatemachine import CodingStateMachine +from .enums import LanguageFilter, MachineState, ProbingState class MultiByteCharSetProber(CharSetProber): @@ -36,56 +40,56 @@ class MultiByteCharSetProber(CharSetProber): MultiByteCharSetProber """ - def __init__(self, lang_filter=None): - super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter) - self.distribution_analyzer = None - self.coding_sm = None - self._last_char = [0, 0] + def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None: + super().__init__(lang_filter=lang_filter) + self.distribution_analyzer: Optional[CharDistributionAnalysis] = None + self.coding_sm: Optional[CodingStateMachine] = None + self._last_char = bytearray(b"\0\0") - def reset(self): - super(MultiByteCharSetProber, self).reset() + def reset(self) -> None: + super().reset() if self.coding_sm: self.coding_sm.reset() if self.distribution_analyzer: self.distribution_analyzer.reset() - self._last_char = [0, 0] - - @property - def charset_name(self): - raise NotImplementedError + self._last_char = bytearray(b"\0\0") - @property - def language(self): - raise NotImplementedError + def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: + assert self.coding_sm is not None + assert self.distribution_analyzer is not None - def feed(self, byte_str): - for i in range(len(byte_str)): - coding_state = self.coding_sm.next_state(byte_str[i]) + for i, byte in enumerate(byte_str): + coding_state = self.coding_sm.next_state(byte) if coding_state == MachineState.ERROR: - self.logger.debug('%s %s prober hit error at byte %s', - self.charset_name, self.language, i) + self.logger.debug( + "%s %s prober hit error at byte %s", + self.charset_name, + self.language, + i, + ) self._state = ProbingState.NOT_ME break - elif coding_state == MachineState.ITS_ME: + if coding_state == MachineState.ITS_ME: self._state = ProbingState.FOUND_IT break - elif coding_state == MachineState.START: + if coding_state == MachineState.START: char_len = self.coding_sm.get_current_charlen() if i == 0: - self._last_char[1] = byte_str[0] + self._last_char[1] = byte self.distribution_analyzer.feed(self._last_char, char_len) else: - self.distribution_analyzer.feed(byte_str[i - 1:i + 1], - char_len) + self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len) self._last_char[0] = byte_str[-1] if self.state == ProbingState.DETECTING: - if (self.distribution_analyzer.got_enough_data() and - (self.get_confidence() > self.SHORTCUT_THRESHOLD)): + if self.distribution_analyzer.got_enough_data() and ( + self.get_confidence() > self.SHORTCUT_THRESHOLD + ): self._state = ProbingState.FOUND_IT return self.state - def get_confidence(self): + def get_confidence(self) -> float: + assert self.distribution_analyzer is not None return self.distribution_analyzer.get_confidence() diff --git a/env/Lib/site-packages/pip/_vendor/chardet/mbcsgroupprober.py b/env/Lib/site-packages/pip/_vendor/chardet/mbcsgroupprober.py index 530abe75e0c00cbfcb2a310d872866f320977d0a..6cb9cc7b3bc751fbb5a54ba06eaaf953bf14ed8d 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/mbcsgroupprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/mbcsgroupprober.py @@ -27,20 +27,22 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +from .big5prober import Big5Prober from .charsetgroupprober import CharSetGroupProber -from .utf8prober import UTF8Prober -from .sjisprober import SJISProber +from .cp949prober import CP949Prober +from .enums import LanguageFilter from .eucjpprober import EUCJPProber -from .gb2312prober import GB2312Prober from .euckrprober import EUCKRProber -from .cp949prober import CP949Prober -from .big5prober import Big5Prober from .euctwprober import EUCTWProber +from .gb2312prober import GB2312Prober +from .johabprober import JOHABProber +from .sjisprober import SJISProber +from .utf8prober import UTF8Prober class MBCSGroupProber(CharSetGroupProber): - def __init__(self, lang_filter=None): - super(MBCSGroupProber, self).__init__(lang_filter=lang_filter) + def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None: + super().__init__(lang_filter=lang_filter) self.probers = [ UTF8Prober(), SJISProber(), @@ -49,6 +51,7 @@ class MBCSGroupProber(CharSetGroupProber): EUCKRProber(), CP949Prober(), Big5Prober(), - EUCTWProber() + EUCTWProber(), + JOHABProber(), ] self.reset() diff --git a/env/Lib/site-packages/pip/_vendor/chardet/mbcssm.py b/env/Lib/site-packages/pip/_vendor/chardet/mbcssm.py index 8360d0f284ef394f2980b5bb89548e234385cdf1..7bbe97e6665356327814e2b797ffcc5724974a46 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/mbcssm.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/mbcssm.py @@ -25,43 +25,45 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +from .codingstatemachinedict import CodingStateMachineDict from .enums import MachineState # BIG5 +# fmt: off BIG5_CLS = ( - 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,1, # 78 - 7f - 4,4,4,4,4,4,4,4, # 80 - 87 - 4,4,4,4,4,4,4,4, # 88 - 8f - 4,4,4,4,4,4,4,4, # 90 - 97 - 4,4,4,4,4,4,4,4, # 98 - 9f - 4,3,3,3,3,3,3,3, # a0 - a7 - 3,3,3,3,3,3,3,3, # a8 - af - 3,3,3,3,3,3,3,3, # b0 - b7 - 3,3,3,3,3,3,3,3, # b8 - bf - 3,3,3,3,3,3,3,3, # c0 - c7 - 3,3,3,3,3,3,3,3, # c8 - cf - 3,3,3,3,3,3,3,3, # d0 - d7 - 3,3,3,3,3,3,3,3, # d8 - df - 3,3,3,3,3,3,3,3, # e0 - e7 - 3,3,3,3,3,3,3,3, # e8 - ef - 3,3,3,3,3,3,3,3, # f0 - f7 - 3,3,3,3,3,3,3,0 # f8 - ff + 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as legal value + 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f + 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17 + 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f + 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27 + 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f + 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37 + 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f + 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47 + 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f + 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57 + 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f + 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67 + 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f + 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77 + 2, 2, 2, 2, 2, 2, 2, 1, # 78 - 7f + 4, 4, 4, 4, 4, 4, 4, 4, # 80 - 87 + 4, 4, 4, 4, 4, 4, 4, 4, # 88 - 8f + 4, 4, 4, 4, 4, 4, 4, 4, # 90 - 97 + 4, 4, 4, 4, 4, 4, 4, 4, # 98 - 9f + 4, 3, 3, 3, 3, 3, 3, 3, # a0 - a7 + 3, 3, 3, 3, 3, 3, 3, 3, # a8 - af + 3, 3, 3, 3, 3, 3, 3, 3, # b0 - b7 + 3, 3, 3, 3, 3, 3, 3, 3, # b8 - bf + 3, 3, 3, 3, 3, 3, 3, 3, # c0 - c7 + 3, 3, 3, 3, 3, 3, 3, 3, # c8 - cf + 3, 3, 3, 3, 3, 3, 3, 3, # d0 - d7 + 3, 3, 3, 3, 3, 3, 3, 3, # d8 - df + 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7 + 3, 3, 3, 3, 3, 3, 3, 3, # e8 - ef + 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7 + 3, 3, 3, 3, 3, 3, 3, 0 # f8 - ff ) BIG5_ST = ( @@ -69,34 +71,37 @@ BIG5_ST = ( MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17 ) +# fmt: on BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0) -BIG5_SM_MODEL = {'class_table': BIG5_CLS, - 'class_factor': 5, - 'state_table': BIG5_ST, - 'char_len_table': BIG5_CHAR_LEN_TABLE, - 'name': 'Big5'} +BIG5_SM_MODEL: CodingStateMachineDict = { + "class_table": BIG5_CLS, + "class_factor": 5, + "state_table": BIG5_ST, + "char_len_table": BIG5_CHAR_LEN_TABLE, + "name": "Big5", +} # CP949 - +# fmt: off CP949_CLS = ( - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f - 1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f - 1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f - 4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f - 1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f - 5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f - 0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f - 6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f - 6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af - 7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf - 7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf - 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df - 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef - 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, # 00 - 0f + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, # 10 - 1f + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 2f + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 3f + 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, # 40 - 4f + 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 1, # 50 - 5f + 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, # 60 - 6f + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 1, # 70 - 7f + 0, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, # 80 - 8f + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, # 90 - 9f + 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, # a0 - af + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, # b0 - bf + 7, 7, 7, 7, 7, 7, 9, 2, 2, 3, 2, 2, 2, 2, 2, 2, # c0 - cf + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, # d0 - df + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, # e0 - ef + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, # f0 - ff ) CP949_ST = ( @@ -109,50 +114,53 @@ CP949_ST = ( MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5 MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6 ) +# fmt: on CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2) -CP949_SM_MODEL = {'class_table': CP949_CLS, - 'class_factor': 10, - 'state_table': CP949_ST, - 'char_len_table': CP949_CHAR_LEN_TABLE, - 'name': 'CP949'} +CP949_SM_MODEL: CodingStateMachineDict = { + "class_table": CP949_CLS, + "class_factor": 10, + "state_table": CP949_ST, + "char_len_table": CP949_CHAR_LEN_TABLE, + "name": "CP949", +} # EUC-JP - +# fmt: off EUCJP_CLS = ( - 4,4,4,4,4,4,4,4, # 00 - 07 - 4,4,4,4,4,4,5,5, # 08 - 0f - 4,4,4,4,4,4,4,4, # 10 - 17 - 4,4,4,5,4,4,4,4, # 18 - 1f - 4,4,4,4,4,4,4,4, # 20 - 27 - 4,4,4,4,4,4,4,4, # 28 - 2f - 4,4,4,4,4,4,4,4, # 30 - 37 - 4,4,4,4,4,4,4,4, # 38 - 3f - 4,4,4,4,4,4,4,4, # 40 - 47 - 4,4,4,4,4,4,4,4, # 48 - 4f - 4,4,4,4,4,4,4,4, # 50 - 57 - 4,4,4,4,4,4,4,4, # 58 - 5f - 4,4,4,4,4,4,4,4, # 60 - 67 - 4,4,4,4,4,4,4,4, # 68 - 6f - 4,4,4,4,4,4,4,4, # 70 - 77 - 4,4,4,4,4,4,4,4, # 78 - 7f - 5,5,5,5,5,5,5,5, # 80 - 87 - 5,5,5,5,5,5,1,3, # 88 - 8f - 5,5,5,5,5,5,5,5, # 90 - 97 - 5,5,5,5,5,5,5,5, # 98 - 9f - 5,2,2,2,2,2,2,2, # a0 - a7 - 2,2,2,2,2,2,2,2, # a8 - af - 2,2,2,2,2,2,2,2, # b0 - b7 - 2,2,2,2,2,2,2,2, # b8 - bf - 2,2,2,2,2,2,2,2, # c0 - c7 - 2,2,2,2,2,2,2,2, # c8 - cf - 2,2,2,2,2,2,2,2, # d0 - d7 - 2,2,2,2,2,2,2,2, # d8 - df - 0,0,0,0,0,0,0,0, # e0 - e7 - 0,0,0,0,0,0,0,0, # e8 - ef - 0,0,0,0,0,0,0,0, # f0 - f7 - 0,0,0,0,0,0,0,5 # f8 - ff + 4, 4, 4, 4, 4, 4, 4, 4, # 00 - 07 + 4, 4, 4, 4, 4, 4, 5, 5, # 08 - 0f + 4, 4, 4, 4, 4, 4, 4, 4, # 10 - 17 + 4, 4, 4, 5, 4, 4, 4, 4, # 18 - 1f + 4, 4, 4, 4, 4, 4, 4, 4, # 20 - 27 + 4, 4, 4, 4, 4, 4, 4, 4, # 28 - 2f + 4, 4, 4, 4, 4, 4, 4, 4, # 30 - 37 + 4, 4, 4, 4, 4, 4, 4, 4, # 38 - 3f + 4, 4, 4, 4, 4, 4, 4, 4, # 40 - 47 + 4, 4, 4, 4, 4, 4, 4, 4, # 48 - 4f + 4, 4, 4, 4, 4, 4, 4, 4, # 50 - 57 + 4, 4, 4, 4, 4, 4, 4, 4, # 58 - 5f + 4, 4, 4, 4, 4, 4, 4, 4, # 60 - 67 + 4, 4, 4, 4, 4, 4, 4, 4, # 68 - 6f + 4, 4, 4, 4, 4, 4, 4, 4, # 70 - 77 + 4, 4, 4, 4, 4, 4, 4, 4, # 78 - 7f + 5, 5, 5, 5, 5, 5, 5, 5, # 80 - 87 + 5, 5, 5, 5, 5, 5, 1, 3, # 88 - 8f + 5, 5, 5, 5, 5, 5, 5, 5, # 90 - 97 + 5, 5, 5, 5, 5, 5, 5, 5, # 98 - 9f + 5, 2, 2, 2, 2, 2, 2, 2, # a0 - a7 + 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af + 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7 + 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf + 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7 + 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf + 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7 + 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df + 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7 + 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef + 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7 + 0, 0, 0, 0, 0, 0, 0, 5 # f8 - ff ) EUCJP_ST = ( @@ -162,100 +170,163 @@ EUCJP_ST = ( MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27 ) +# fmt: on EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0) -EUCJP_SM_MODEL = {'class_table': EUCJP_CLS, - 'class_factor': 6, - 'state_table': EUCJP_ST, - 'char_len_table': EUCJP_CHAR_LEN_TABLE, - 'name': 'EUC-JP'} +EUCJP_SM_MODEL: CodingStateMachineDict = { + "class_table": EUCJP_CLS, + "class_factor": 6, + "state_table": EUCJP_ST, + "char_len_table": EUCJP_CHAR_LEN_TABLE, + "name": "EUC-JP", +} # EUC-KR - +# fmt: off EUCKR_CLS = ( - 1,1,1,1,1,1,1,1, # 00 - 07 - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 1,1,1,1,1,1,1,1, # 40 - 47 + 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 + 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f + 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17 + 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f + 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27 + 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f + 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37 + 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f + 1, 1, 1, 1, 1, 1, 1, 1, # 40 - 47 + 1, 1, 1, 1, 1, 1, 1, 1, # 48 - 4f + 1, 1, 1, 1, 1, 1, 1, 1, # 50 - 57 + 1, 1, 1, 1, 1, 1, 1, 1, # 58 - 5f + 1, 1, 1, 1, 1, 1, 1, 1, # 60 - 67 + 1, 1, 1, 1, 1, 1, 1, 1, # 68 - 6f + 1, 1, 1, 1, 1, 1, 1, 1, # 70 - 77 + 1, 1, 1, 1, 1, 1, 1, 1, # 78 - 7f + 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87 + 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f + 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97 + 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f + 0, 2, 2, 2, 2, 2, 2, 2, # a0 - a7 + 2, 2, 2, 2, 2, 3, 3, 3, # a8 - af + 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7 + 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf + 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7 + 2, 3, 2, 2, 2, 2, 2, 2, # c8 - cf + 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7 + 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df + 2, 2, 2, 2, 2, 2, 2, 2, # e0 - e7 + 2, 2, 2, 2, 2, 2, 2, 2, # e8 - ef + 2, 2, 2, 2, 2, 2, 2, 2, # f0 - f7 + 2, 2, 2, 2, 2, 2, 2, 0 # f8 - ff +) + +EUCKR_ST = ( + MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f +) +# fmt: on + +EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0) + +EUCKR_SM_MODEL: CodingStateMachineDict = { + "class_table": EUCKR_CLS, + "class_factor": 4, + "state_table": EUCKR_ST, + "char_len_table": EUCKR_CHAR_LEN_TABLE, + "name": "EUC-KR", +} + +# JOHAB +# fmt: off +JOHAB_CLS = ( + 4,4,4,4,4,4,4,4, # 00 - 07 + 4,4,4,4,4,4,0,0, # 08 - 0f + 4,4,4,4,4,4,4,4, # 10 - 17 + 4,4,4,0,4,4,4,4, # 18 - 1f + 4,4,4,4,4,4,4,4, # 20 - 27 + 4,4,4,4,4,4,4,4, # 28 - 2f + 4,3,3,3,3,3,3,3, # 30 - 37 + 3,3,3,3,3,3,3,3, # 38 - 3f + 3,1,1,1,1,1,1,1, # 40 - 47 1,1,1,1,1,1,1,1, # 48 - 4f 1,1,1,1,1,1,1,1, # 50 - 57 1,1,1,1,1,1,1,1, # 58 - 5f 1,1,1,1,1,1,1,1, # 60 - 67 1,1,1,1,1,1,1,1, # 68 - 6f 1,1,1,1,1,1,1,1, # 70 - 77 - 1,1,1,1,1,1,1,1, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,0,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,2,2,2,2,2,2,2, # a0 - a7 - 2,2,2,2,2,3,3,3, # a8 - af - 2,2,2,2,2,2,2,2, # b0 - b7 - 2,2,2,2,2,2,2,2, # b8 - bf - 2,2,2,2,2,2,2,2, # c0 - c7 - 2,3,2,2,2,2,2,2, # c8 - cf - 2,2,2,2,2,2,2,2, # d0 - d7 - 2,2,2,2,2,2,2,2, # d8 - df - 2,2,2,2,2,2,2,2, # e0 - e7 - 2,2,2,2,2,2,2,2, # e8 - ef - 2,2,2,2,2,2,2,2, # f0 - f7 - 2,2,2,2,2,2,2,0 # f8 - ff + 1,1,1,1,1,1,1,2, # 78 - 7f + 6,6,6,6,8,8,8,8, # 80 - 87 + 8,8,8,8,8,8,8,8, # 88 - 8f + 8,7,7,7,7,7,7,7, # 90 - 97 + 7,7,7,7,7,7,7,7, # 98 - 9f + 7,7,7,7,7,7,7,7, # a0 - a7 + 7,7,7,7,7,7,7,7, # a8 - af + 7,7,7,7,7,7,7,7, # b0 - b7 + 7,7,7,7,7,7,7,7, # b8 - bf + 7,7,7,7,7,7,7,7, # c0 - c7 + 7,7,7,7,7,7,7,7, # c8 - cf + 7,7,7,7,5,5,5,5, # d0 - d7 + 5,9,9,9,9,9,9,5, # d8 - df + 9,9,9,9,9,9,9,9, # e0 - e7 + 9,9,9,9,9,9,9,9, # e8 - ef + 9,9,9,9,9,9,9,9, # f0 - f7 + 9,9,5,5,5,5,5,0 # f8 - ff ) -EUCKR_ST = ( - MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f +JOHAB_ST = ( +# cls = 0 1 2 3 4 5 6 7 8 9 + MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,3 ,3 ,4 , # MachineState.START + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME + MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR , # MachineState.ERROR + MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START , # 3 + MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START , # 4 ) +# fmt: on -EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0) +JOHAB_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 0, 0, 2, 2, 2) -EUCKR_SM_MODEL = {'class_table': EUCKR_CLS, - 'class_factor': 4, - 'state_table': EUCKR_ST, - 'char_len_table': EUCKR_CHAR_LEN_TABLE, - 'name': 'EUC-KR'} +JOHAB_SM_MODEL: CodingStateMachineDict = { + "class_table": JOHAB_CLS, + "class_factor": 10, + "state_table": JOHAB_ST, + "char_len_table": JOHAB_CHAR_LEN_TABLE, + "name": "Johab", +} # EUC-TW - +# fmt: off EUCTW_CLS = ( - 2,2,2,2,2,2,2,2, # 00 - 07 - 2,2,2,2,2,2,0,0, # 08 - 0f - 2,2,2,2,2,2,2,2, # 10 - 17 - 2,2,2,0,2,2,2,2, # 18 - 1f - 2,2,2,2,2,2,2,2, # 20 - 27 - 2,2,2,2,2,2,2,2, # 28 - 2f - 2,2,2,2,2,2,2,2, # 30 - 37 - 2,2,2,2,2,2,2,2, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,2, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,6,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,3,4,4,4,4,4,4, # a0 - a7 - 5,5,1,1,1,1,1,1, # a8 - af - 1,1,1,1,1,1,1,1, # b0 - b7 - 1,1,1,1,1,1,1,1, # b8 - bf - 1,1,3,1,3,3,3,3, # c0 - c7 - 3,3,3,3,3,3,3,3, # c8 - cf - 3,3,3,3,3,3,3,3, # d0 - d7 - 3,3,3,3,3,3,3,3, # d8 - df - 3,3,3,3,3,3,3,3, # e0 - e7 - 3,3,3,3,3,3,3,3, # e8 - ef - 3,3,3,3,3,3,3,3, # f0 - f7 - 3,3,3,3,3,3,3,0 # f8 - ff + 2, 2, 2, 2, 2, 2, 2, 2, # 00 - 07 + 2, 2, 2, 2, 2, 2, 0, 0, # 08 - 0f + 2, 2, 2, 2, 2, 2, 2, 2, # 10 - 17 + 2, 2, 2, 0, 2, 2, 2, 2, # 18 - 1f + 2, 2, 2, 2, 2, 2, 2, 2, # 20 - 27 + 2, 2, 2, 2, 2, 2, 2, 2, # 28 - 2f + 2, 2, 2, 2, 2, 2, 2, 2, # 30 - 37 + 2, 2, 2, 2, 2, 2, 2, 2, # 38 - 3f + 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47 + 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f + 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57 + 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f + 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67 + 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f + 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77 + 2, 2, 2, 2, 2, 2, 2, 2, # 78 - 7f + 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87 + 0, 0, 0, 0, 0, 0, 6, 0, # 88 - 8f + 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97 + 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f + 0, 3, 4, 4, 4, 4, 4, 4, # a0 - a7 + 5, 5, 1, 1, 1, 1, 1, 1, # a8 - af + 1, 1, 1, 1, 1, 1, 1, 1, # b0 - b7 + 1, 1, 1, 1, 1, 1, 1, 1, # b8 - bf + 1, 1, 3, 1, 3, 3, 3, 3, # c0 - c7 + 3, 3, 3, 3, 3, 3, 3, 3, # c8 - cf + 3, 3, 3, 3, 3, 3, 3, 3, # d0 - d7 + 3, 3, 3, 3, 3, 3, 3, 3, # d8 - df + 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7 + 3, 3, 3, 3, 3, 3, 3, 3, # e8 - ef + 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7 + 3, 3, 3, 3, 3, 3, 3, 0 # f8 - ff ) EUCTW_ST = ( @@ -266,50 +337,53 @@ EUCTW_ST = ( 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27 MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f ) +# fmt: on EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3) -EUCTW_SM_MODEL = {'class_table': EUCTW_CLS, - 'class_factor': 7, - 'state_table': EUCTW_ST, - 'char_len_table': EUCTW_CHAR_LEN_TABLE, - 'name': 'x-euc-tw'} +EUCTW_SM_MODEL: CodingStateMachineDict = { + "class_table": EUCTW_CLS, + "class_factor": 7, + "state_table": EUCTW_ST, + "char_len_table": EUCTW_CHAR_LEN_TABLE, + "name": "x-euc-tw", +} # GB2312 - +# fmt: off GB2312_CLS = ( - 1,1,1,1,1,1,1,1, # 00 - 07 - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 3,3,3,3,3,3,3,3, # 30 - 37 - 3,3,1,1,1,1,1,1, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,4, # 78 - 7f - 5,6,6,6,6,6,6,6, # 80 - 87 - 6,6,6,6,6,6,6,6, # 88 - 8f - 6,6,6,6,6,6,6,6, # 90 - 97 - 6,6,6,6,6,6,6,6, # 98 - 9f - 6,6,6,6,6,6,6,6, # a0 - a7 - 6,6,6,6,6,6,6,6, # a8 - af - 6,6,6,6,6,6,6,6, # b0 - b7 - 6,6,6,6,6,6,6,6, # b8 - bf - 6,6,6,6,6,6,6,6, # c0 - c7 - 6,6,6,6,6,6,6,6, # c8 - cf - 6,6,6,6,6,6,6,6, # d0 - d7 - 6,6,6,6,6,6,6,6, # d8 - df - 6,6,6,6,6,6,6,6, # e0 - e7 - 6,6,6,6,6,6,6,6, # e8 - ef - 6,6,6,6,6,6,6,6, # f0 - f7 - 6,6,6,6,6,6,6,0 # f8 - ff + 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 + 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f + 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17 + 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f + 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27 + 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f + 3, 3, 3, 3, 3, 3, 3, 3, # 30 - 37 + 3, 3, 1, 1, 1, 1, 1, 1, # 38 - 3f + 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47 + 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f + 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57 + 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f + 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67 + 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f + 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77 + 2, 2, 2, 2, 2, 2, 2, 4, # 78 - 7f + 5, 6, 6, 6, 6, 6, 6, 6, # 80 - 87 + 6, 6, 6, 6, 6, 6, 6, 6, # 88 - 8f + 6, 6, 6, 6, 6, 6, 6, 6, # 90 - 97 + 6, 6, 6, 6, 6, 6, 6, 6, # 98 - 9f + 6, 6, 6, 6, 6, 6, 6, 6, # a0 - a7 + 6, 6, 6, 6, 6, 6, 6, 6, # a8 - af + 6, 6, 6, 6, 6, 6, 6, 6, # b0 - b7 + 6, 6, 6, 6, 6, 6, 6, 6, # b8 - bf + 6, 6, 6, 6, 6, 6, 6, 6, # c0 - c7 + 6, 6, 6, 6, 6, 6, 6, 6, # c8 - cf + 6, 6, 6, 6, 6, 6, 6, 6, # d0 - d7 + 6, 6, 6, 6, 6, 6, 6, 6, # d8 - df + 6, 6, 6, 6, 6, 6, 6, 6, # e0 - e7 + 6, 6, 6, 6, 6, 6, 6, 6, # e8 - ef + 6, 6, 6, 6, 6, 6, 6, 6, # f0 - f7 + 6, 6, 6, 6, 6, 6, 6, 0 # f8 - ff ) GB2312_ST = ( @@ -320,6 +394,7 @@ GB2312_ST = ( MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27 MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f ) +# fmt: on # To be accurate, the length of class 6 can be either 2 or 4. # But it is not necessary to discriminate between the two since @@ -328,100 +403,105 @@ GB2312_ST = ( # 2 here. GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2) -GB2312_SM_MODEL = {'class_table': GB2312_CLS, - 'class_factor': 7, - 'state_table': GB2312_ST, - 'char_len_table': GB2312_CHAR_LEN_TABLE, - 'name': 'GB2312'} +GB2312_SM_MODEL: CodingStateMachineDict = { + "class_table": GB2312_CLS, + "class_factor": 7, + "state_table": GB2312_ST, + "char_len_table": GB2312_CHAR_LEN_TABLE, + "name": "GB2312", +} # Shift_JIS - +# fmt: off SJIS_CLS = ( - 1,1,1,1,1,1,1,1, # 00 - 07 - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,1, # 78 - 7f - 3,3,3,3,3,2,2,3, # 80 - 87 - 3,3,3,3,3,3,3,3, # 88 - 8f - 3,3,3,3,3,3,3,3, # 90 - 97 - 3,3,3,3,3,3,3,3, # 98 - 9f + 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 + 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f + 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17 + 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f + 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27 + 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f + 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37 + 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f + 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47 + 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f + 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57 + 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f + 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67 + 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f + 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77 + 2, 2, 2, 2, 2, 2, 2, 1, # 78 - 7f + 3, 3, 3, 3, 3, 2, 2, 3, # 80 - 87 + 3, 3, 3, 3, 3, 3, 3, 3, # 88 - 8f + 3, 3, 3, 3, 3, 3, 3, 3, # 90 - 97 + 3, 3, 3, 3, 3, 3, 3, 3, # 98 - 9f #0xa0 is illegal in sjis encoding, but some pages does #contain such byte. We need to be more error forgiven. - 2,2,2,2,2,2,2,2, # a0 - a7 - 2,2,2,2,2,2,2,2, # a8 - af - 2,2,2,2,2,2,2,2, # b0 - b7 - 2,2,2,2,2,2,2,2, # b8 - bf - 2,2,2,2,2,2,2,2, # c0 - c7 - 2,2,2,2,2,2,2,2, # c8 - cf - 2,2,2,2,2,2,2,2, # d0 - d7 - 2,2,2,2,2,2,2,2, # d8 - df - 3,3,3,3,3,3,3,3, # e0 - e7 - 3,3,3,3,3,4,4,4, # e8 - ef - 3,3,3,3,3,3,3,3, # f0 - f7 - 3,3,3,3,3,0,0,0) # f8 - ff - + 2, 2, 2, 2, 2, 2, 2, 2, # a0 - a7 + 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af + 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7 + 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf + 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7 + 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf + 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7 + 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df + 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7 + 3, 3, 3, 3, 3, 4, 4, 4, # e8 - ef + 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7 + 3, 3, 3, 3, 3, 0, 0, 0, # f8 - ff +) SJIS_ST = ( MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17 ) +# fmt: on SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0) -SJIS_SM_MODEL = {'class_table': SJIS_CLS, - 'class_factor': 6, - 'state_table': SJIS_ST, - 'char_len_table': SJIS_CHAR_LEN_TABLE, - 'name': 'Shift_JIS'} +SJIS_SM_MODEL: CodingStateMachineDict = { + "class_table": SJIS_CLS, + "class_factor": 6, + "state_table": SJIS_ST, + "char_len_table": SJIS_CHAR_LEN_TABLE, + "name": "Shift_JIS", +} # UCS2-BE - +# fmt: off UCS2BE_CLS = ( - 0,0,0,0,0,0,0,0, # 00 - 07 - 0,0,1,0,0,2,0,0, # 08 - 0f - 0,0,0,0,0,0,0,0, # 10 - 17 - 0,0,0,3,0,0,0,0, # 18 - 1f - 0,0,0,0,0,0,0,0, # 20 - 27 - 0,3,3,3,3,3,0,0, # 28 - 2f - 0,0,0,0,0,0,0,0, # 30 - 37 - 0,0,0,0,0,0,0,0, # 38 - 3f - 0,0,0,0,0,0,0,0, # 40 - 47 - 0,0,0,0,0,0,0,0, # 48 - 4f - 0,0,0,0,0,0,0,0, # 50 - 57 - 0,0,0,0,0,0,0,0, # 58 - 5f - 0,0,0,0,0,0,0,0, # 60 - 67 - 0,0,0,0,0,0,0,0, # 68 - 6f - 0,0,0,0,0,0,0,0, # 70 - 77 - 0,0,0,0,0,0,0,0, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,0,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,0,0,0,0,0,0,0, # a0 - a7 - 0,0,0,0,0,0,0,0, # a8 - af - 0,0,0,0,0,0,0,0, # b0 - b7 - 0,0,0,0,0,0,0,0, # b8 - bf - 0,0,0,0,0,0,0,0, # c0 - c7 - 0,0,0,0,0,0,0,0, # c8 - cf - 0,0,0,0,0,0,0,0, # d0 - d7 - 0,0,0,0,0,0,0,0, # d8 - df - 0,0,0,0,0,0,0,0, # e0 - e7 - 0,0,0,0,0,0,0,0, # e8 - ef - 0,0,0,0,0,0,0,0, # f0 - f7 - 0,0,0,0,0,0,4,5 # f8 - ff + 0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 + 0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f + 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17 + 0, 0, 0, 3, 0, 0, 0, 0, # 18 - 1f + 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27 + 0, 3, 3, 3, 3, 3, 0, 0, # 28 - 2f + 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37 + 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f + 0, 0, 0, 0, 0, 0, 0, 0, # 40 - 47 + 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f + 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57 + 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f + 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67 + 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f + 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77 + 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f + 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87 + 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f + 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97 + 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f + 0, 0, 0, 0, 0, 0, 0, 0, # a0 - a7 + 0, 0, 0, 0, 0, 0, 0, 0, # a8 - af + 0, 0, 0, 0, 0, 0, 0, 0, # b0 - b7 + 0, 0, 0, 0, 0, 0, 0, 0, # b8 - bf + 0, 0, 0, 0, 0, 0, 0, 0, # c0 - c7 + 0, 0, 0, 0, 0, 0, 0, 0, # c8 - cf + 0, 0, 0, 0, 0, 0, 0, 0, # d0 - d7 + 0, 0, 0, 0, 0, 0, 0, 0, # d8 - df + 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7 + 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef + 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7 + 0, 0, 0, 0, 0, 0, 4, 5 # f8 - ff ) UCS2BE_ST = ( @@ -433,50 +513,53 @@ UCS2BE_ST = ( 5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37 ) +# fmt: on UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2) -UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS, - 'class_factor': 6, - 'state_table': UCS2BE_ST, - 'char_len_table': UCS2BE_CHAR_LEN_TABLE, - 'name': 'UTF-16BE'} +UCS2BE_SM_MODEL: CodingStateMachineDict = { + "class_table": UCS2BE_CLS, + "class_factor": 6, + "state_table": UCS2BE_ST, + "char_len_table": UCS2BE_CHAR_LEN_TABLE, + "name": "UTF-16BE", +} # UCS2-LE - +# fmt: off UCS2LE_CLS = ( - 0,0,0,0,0,0,0,0, # 00 - 07 - 0,0,1,0,0,2,0,0, # 08 - 0f - 0,0,0,0,0,0,0,0, # 10 - 17 - 0,0,0,3,0,0,0,0, # 18 - 1f - 0,0,0,0,0,0,0,0, # 20 - 27 - 0,3,3,3,3,3,0,0, # 28 - 2f - 0,0,0,0,0,0,0,0, # 30 - 37 - 0,0,0,0,0,0,0,0, # 38 - 3f - 0,0,0,0,0,0,0,0, # 40 - 47 - 0,0,0,0,0,0,0,0, # 48 - 4f - 0,0,0,0,0,0,0,0, # 50 - 57 - 0,0,0,0,0,0,0,0, # 58 - 5f - 0,0,0,0,0,0,0,0, # 60 - 67 - 0,0,0,0,0,0,0,0, # 68 - 6f - 0,0,0,0,0,0,0,0, # 70 - 77 - 0,0,0,0,0,0,0,0, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,0,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,0,0,0,0,0,0,0, # a0 - a7 - 0,0,0,0,0,0,0,0, # a8 - af - 0,0,0,0,0,0,0,0, # b0 - b7 - 0,0,0,0,0,0,0,0, # b8 - bf - 0,0,0,0,0,0,0,0, # c0 - c7 - 0,0,0,0,0,0,0,0, # c8 - cf - 0,0,0,0,0,0,0,0, # d0 - d7 - 0,0,0,0,0,0,0,0, # d8 - df - 0,0,0,0,0,0,0,0, # e0 - e7 - 0,0,0,0,0,0,0,0, # e8 - ef - 0,0,0,0,0,0,0,0, # f0 - f7 - 0,0,0,0,0,0,4,5 # f8 - ff + 0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 + 0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f + 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17 + 0, 0, 0, 3, 0, 0, 0, 0, # 18 - 1f + 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27 + 0, 3, 3, 3, 3, 3, 0, 0, # 28 - 2f + 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37 + 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f + 0, 0, 0, 0, 0, 0, 0, 0, # 40 - 47 + 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f + 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57 + 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f + 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67 + 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f + 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77 + 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f + 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87 + 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f + 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97 + 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f + 0, 0, 0, 0, 0, 0, 0, 0, # a0 - a7 + 0, 0, 0, 0, 0, 0, 0, 0, # a8 - af + 0, 0, 0, 0, 0, 0, 0, 0, # b0 - b7 + 0, 0, 0, 0, 0, 0, 0, 0, # b8 - bf + 0, 0, 0, 0, 0, 0, 0, 0, # c0 - c7 + 0, 0, 0, 0, 0, 0, 0, 0, # c8 - cf + 0, 0, 0, 0, 0, 0, 0, 0, # d0 - d7 + 0, 0, 0, 0, 0, 0, 0, 0, # d8 - df + 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7 + 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef + 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7 + 0, 0, 0, 0, 0, 0, 4, 5 # f8 - ff ) UCS2LE_ST = ( @@ -488,50 +571,53 @@ UCS2LE_ST = ( 5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37 ) +# fmt: on UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2) -UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS, - 'class_factor': 6, - 'state_table': UCS2LE_ST, - 'char_len_table': UCS2LE_CHAR_LEN_TABLE, - 'name': 'UTF-16LE'} +UCS2LE_SM_MODEL: CodingStateMachineDict = { + "class_table": UCS2LE_CLS, + "class_factor": 6, + "state_table": UCS2LE_ST, + "char_len_table": UCS2LE_CHAR_LEN_TABLE, + "name": "UTF-16LE", +} # UTF-8 - +# fmt: off UTF8_CLS = ( - 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 1,1,1,1,1,1,1,1, # 40 - 47 - 1,1,1,1,1,1,1,1, # 48 - 4f - 1,1,1,1,1,1,1,1, # 50 - 57 - 1,1,1,1,1,1,1,1, # 58 - 5f - 1,1,1,1,1,1,1,1, # 60 - 67 - 1,1,1,1,1,1,1,1, # 68 - 6f - 1,1,1,1,1,1,1,1, # 70 - 77 - 1,1,1,1,1,1,1,1, # 78 - 7f - 2,2,2,2,3,3,3,3, # 80 - 87 - 4,4,4,4,4,4,4,4, # 88 - 8f - 4,4,4,4,4,4,4,4, # 90 - 97 - 4,4,4,4,4,4,4,4, # 98 - 9f - 5,5,5,5,5,5,5,5, # a0 - a7 - 5,5,5,5,5,5,5,5, # a8 - af - 5,5,5,5,5,5,5,5, # b0 - b7 - 5,5,5,5,5,5,5,5, # b8 - bf - 0,0,6,6,6,6,6,6, # c0 - c7 - 6,6,6,6,6,6,6,6, # c8 - cf - 6,6,6,6,6,6,6,6, # d0 - d7 - 6,6,6,6,6,6,6,6, # d8 - df - 7,8,8,8,8,8,8,8, # e0 - e7 - 8,8,8,8,8,9,8,8, # e8 - ef - 10,11,11,11,11,11,11,11, # f0 - f7 - 12,13,13,13,14,15,0,0 # f8 - ff + 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as a legal value + 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f + 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17 + 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f + 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27 + 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f + 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37 + 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f + 1, 1, 1, 1, 1, 1, 1, 1, # 40 - 47 + 1, 1, 1, 1, 1, 1, 1, 1, # 48 - 4f + 1, 1, 1, 1, 1, 1, 1, 1, # 50 - 57 + 1, 1, 1, 1, 1, 1, 1, 1, # 58 - 5f + 1, 1, 1, 1, 1, 1, 1, 1, # 60 - 67 + 1, 1, 1, 1, 1, 1, 1, 1, # 68 - 6f + 1, 1, 1, 1, 1, 1, 1, 1, # 70 - 77 + 1, 1, 1, 1, 1, 1, 1, 1, # 78 - 7f + 2, 2, 2, 2, 3, 3, 3, 3, # 80 - 87 + 4, 4, 4, 4, 4, 4, 4, 4, # 88 - 8f + 4, 4, 4, 4, 4, 4, 4, 4, # 90 - 97 + 4, 4, 4, 4, 4, 4, 4, 4, # 98 - 9f + 5, 5, 5, 5, 5, 5, 5, 5, # a0 - a7 + 5, 5, 5, 5, 5, 5, 5, 5, # a8 - af + 5, 5, 5, 5, 5, 5, 5, 5, # b0 - b7 + 5, 5, 5, 5, 5, 5, 5, 5, # b8 - bf + 0, 0, 6, 6, 6, 6, 6, 6, # c0 - c7 + 6, 6, 6, 6, 6, 6, 6, 6, # c8 - cf + 6, 6, 6, 6, 6, 6, 6, 6, # d0 - d7 + 6, 6, 6, 6, 6, 6, 6, 6, # d8 - df + 7, 8, 8, 8, 8, 8, 8, 8, # e0 - e7 + 8, 8, 8, 8, 8, 9, 8, 8, # e8 - ef + 10, 11, 11, 11, 11, 11, 11, 11, # f0 - f7 + 12, 13, 13, 13, 14, 15, 0, 0 # f8 - ff ) UTF8_ST = ( @@ -562,11 +648,14 @@ UTF8_ST = ( MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7 MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf ) +# fmt: on UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6) -UTF8_SM_MODEL = {'class_table': UTF8_CLS, - 'class_factor': 16, - 'state_table': UTF8_ST, - 'char_len_table': UTF8_CHAR_LEN_TABLE, - 'name': 'UTF-8'} +UTF8_SM_MODEL: CodingStateMachineDict = { + "class_table": UTF8_CLS, + "class_factor": 16, + "state_table": UTF8_ST, + "char_len_table": UTF8_CHAR_LEN_TABLE, + "name": "UTF-8", +} diff --git a/env/Lib/site-packages/pip/_vendor/chardet/metadata/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/metadata/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index f20fa90dfbfd044f2ed07632e8c63551c1f68955..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/metadata/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/metadata/__pycache__/languages.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/chardet/metadata/__pycache__/languages.cpython-39.pyc deleted file mode 100644 index 89a848bfcfb9b4e13f7f563f9c97ae182615ec54..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/chardet/metadata/__pycache__/languages.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/chardet/metadata/languages.py b/env/Lib/site-packages/pip/_vendor/chardet/metadata/languages.py index 3237d5abf60122e0cea5463ff34f2256b11b5a81..eb40c5f0c8526208d434d762855d23079dc68b36 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/metadata/languages.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/metadata/languages.py @@ -1,19 +1,17 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- """ Metadata about languages used by our model training code for our SingleByteCharSetProbers. Could be used for other things in the future. This code is based on the language metadata from the uchardet project. """ -from __future__ import absolute_import, print_function from string import ascii_letters +from typing import List, Optional +# TODO: Add Ukrainian (KOI8-U) -# TODO: Add Ukranian (KOI8-U) -class Language(object): +class Language: """Metadata about a language useful for training models :ivar name: The human name for the language, in English. @@ -33,9 +31,17 @@ class Language(object): Wikipedia for training data. :type wiki_start_pages: list of str """ - def __init__(self, name=None, iso_code=None, use_ascii=True, charsets=None, - alphabet=None, wiki_start_pages=None): - super(Language, self).__init__() + + def __init__( + self, + name: Optional[str] = None, + iso_code: Optional[str] = None, + use_ascii: bool = True, + charsets: Optional[List[str]] = None, + alphabet: Optional[str] = None, + wiki_start_pages: Optional[List[str]] = None, + ) -> None: + super().__init__() self.name = name self.iso_code = iso_code self.use_ascii = use_ascii @@ -46,265 +52,301 @@ class Language(object): else: alphabet = ascii_letters elif not alphabet: - raise ValueError('Must supply alphabet if use_ascii is False') - self.alphabet = ''.join(sorted(set(alphabet))) if alphabet else None + raise ValueError("Must supply alphabet if use_ascii is False") + self.alphabet = "".join(sorted(set(alphabet))) if alphabet else None self.wiki_start_pages = wiki_start_pages - def __repr__(self): - return '{}({})'.format(self.__class__.__name__, - ', '.join('{}={!r}'.format(k, v) - for k, v in self.__dict__.items() - if not k.startswith('_'))) + def __repr__(self) -> str: + param_str = ", ".join( + f"{k}={v!r}" for k, v in self.__dict__.items() if not k.startswith("_") + ) + return f"{self.__class__.__name__}({param_str})" -LANGUAGES = {'Arabic': Language(name='Arabic', - iso_code='ar', - use_ascii=False, - # We only support encodings that use isolated - # forms, because the current recommendation is - # that the rendering system handles presentation - # forms. This means we purposefully skip IBM864. - charsets=['ISO-8859-6', 'WINDOWS-1256', - 'CP720', 'CP864'], - alphabet=u'ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ', - wiki_start_pages=[u'الصفحة_الرئيسية']), - 'Belarusian': Language(name='Belarusian', - iso_code='be', - use_ascii=False, - charsets=['ISO-8859-5', 'WINDOWS-1251', - 'IBM866', 'MacCyrillic'], - alphabet=(u'АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯ' - u'абвгдеёжзійклмнопрстуўфхцчшыьэюяʼ'), - wiki_start_pages=[u'Галоўная_старонка']), - 'Bulgarian': Language(name='Bulgarian', - iso_code='bg', - use_ascii=False, - charsets=['ISO-8859-5', 'WINDOWS-1251', - 'IBM855'], - alphabet=(u'АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯ' - u'абвгдежзийклмнопрстуфхцчшщъьюя'), - wiki_start_pages=[u'Начална_страница']), - 'Czech': Language(name='Czech', - iso_code='cz', - use_ascii=True, - charsets=['ISO-8859-2', 'WINDOWS-1250'], - alphabet=u'áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ', - wiki_start_pages=[u'Hlavní_strana']), - 'Danish': Language(name='Danish', - iso_code='da', - use_ascii=True, - charsets=['ISO-8859-1', 'ISO-8859-15', - 'WINDOWS-1252'], - alphabet=u'æøåÆØÅ', - wiki_start_pages=[u'Forside']), - 'German': Language(name='German', - iso_code='de', - use_ascii=True, - charsets=['ISO-8859-1', 'WINDOWS-1252'], - alphabet=u'äöüßÄÖÜ', - wiki_start_pages=[u'Wikipedia:Hauptseite']), - 'Greek': Language(name='Greek', - iso_code='el', - use_ascii=False, - charsets=['ISO-8859-7', 'WINDOWS-1253'], - alphabet=(u'αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώ' - u'ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ'), - wiki_start_pages=[u'Πύλη:Κύρια']), - 'English': Language(name='English', - iso_code='en', - use_ascii=True, - charsets=['ISO-8859-1', 'WINDOWS-1252'], - wiki_start_pages=[u'Main_Page']), - 'Esperanto': Language(name='Esperanto', - iso_code='eo', - # Q, W, X, and Y not used at all - use_ascii=False, - charsets=['ISO-8859-3'], - alphabet=(u'abcĉdefgĝhĥijĵklmnoprsŝtuŭvz' - u'ABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ'), - wiki_start_pages=[u'Vikipedio:Ĉefpaĝo']), - 'Spanish': Language(name='Spanish', - iso_code='es', - use_ascii=True, - charsets=['ISO-8859-1', 'ISO-8859-15', - 'WINDOWS-1252'], - alphabet=u'ñáéíóúüÑÁÉÍÓÚÜ', - wiki_start_pages=[u'Wikipedia:Portada']), - 'Estonian': Language(name='Estonian', - iso_code='et', - use_ascii=False, - charsets=['ISO-8859-4', 'ISO-8859-13', - 'WINDOWS-1257'], - # C, F, Š, Q, W, X, Y, Z, Ž are only for - # loanwords - alphabet=(u'ABDEGHIJKLMNOPRSTUVÕÄÖÜ' - u'abdeghijklmnoprstuvõäöü'), - wiki_start_pages=[u'Esileht']), - 'Finnish': Language(name='Finnish', - iso_code='fi', - use_ascii=True, - charsets=['ISO-8859-1', 'ISO-8859-15', - 'WINDOWS-1252'], - alphabet=u'ÅÄÖŠŽåäöšž', - wiki_start_pages=[u'Wikipedia:Etusivu']), - 'French': Language(name='French', - iso_code='fr', - use_ascii=True, - charsets=['ISO-8859-1', 'ISO-8859-15', - 'WINDOWS-1252'], - alphabet=u'œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ', - wiki_start_pages=[u'Wikipédia:Accueil_principal', - u'Bœuf (animal)']), - 'Hebrew': Language(name='Hebrew', - iso_code='he', - use_ascii=False, - charsets=['ISO-8859-8', 'WINDOWS-1255'], - alphabet=u'אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ', - wiki_start_pages=[u'עמוד_ראשי']), - 'Croatian': Language(name='Croatian', - iso_code='hr', - # Q, W, X, Y are only used for foreign words. - use_ascii=False, - charsets=['ISO-8859-2', 'WINDOWS-1250'], - alphabet=(u'abcčćdđefghijklmnoprsštuvzž' - u'ABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ'), - wiki_start_pages=[u'Glavna_stranica']), - 'Hungarian': Language(name='Hungarian', - iso_code='hu', - # Q, W, X, Y are only used for foreign words. - use_ascii=False, - charsets=['ISO-8859-2', 'WINDOWS-1250'], - alphabet=(u'abcdefghijklmnoprstuvzáéíóöőúüű' - u'ABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ'), - wiki_start_pages=[u'Kezdőlap']), - 'Italian': Language(name='Italian', - iso_code='it', - use_ascii=True, - charsets=['ISO-8859-1', 'ISO-8859-15', - 'WINDOWS-1252'], - alphabet=u'ÀÈÉÌÒÓÙàèéìòóù', - wiki_start_pages=[u'Pagina_principale']), - 'Lithuanian': Language(name='Lithuanian', - iso_code='lt', - use_ascii=False, - charsets=['ISO-8859-13', 'WINDOWS-1257', - 'ISO-8859-4'], - # Q, W, and X not used at all - alphabet=(u'AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽ' - u'aąbcčdeęėfghiįyjklmnoprsštuųūvzž'), - wiki_start_pages=[u'Pagrindinis_puslapis']), - 'Latvian': Language(name='Latvian', - iso_code='lv', - use_ascii=False, - charsets=['ISO-8859-13', 'WINDOWS-1257', - 'ISO-8859-4'], - # Q, W, X, Y are only for loanwords - alphabet=(u'AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽ' - u'aābcčdeēfgģhiījkķlļmnņoprsštuūvzž'), - wiki_start_pages=[u'Sākumlapa']), - 'Macedonian': Language(name='Macedonian', - iso_code='mk', - use_ascii=False, - charsets=['ISO-8859-5', 'WINDOWS-1251', - 'MacCyrillic', 'IBM855'], - alphabet=(u'АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШ' - u'абвгдѓежзѕијклљмнњопрстќуфхцчџш'), - wiki_start_pages=[u'Главна_страница']), - 'Dutch': Language(name='Dutch', - iso_code='nl', - use_ascii=True, - charsets=['ISO-8859-1', 'WINDOWS-1252'], - wiki_start_pages=[u'Hoofdpagina']), - 'Polish': Language(name='Polish', - iso_code='pl', - # Q and X are only used for foreign words. - use_ascii=False, - charsets=['ISO-8859-2', 'WINDOWS-1250'], - alphabet=(u'AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻ' - u'aąbcćdeęfghijklłmnńoóprsśtuwyzźż'), - wiki_start_pages=[u'Wikipedia:Strona_główna']), - 'Portuguese': Language(name='Portuguese', - iso_code='pt', - use_ascii=True, - charsets=['ISO-8859-1', 'ISO-8859-15', - 'WINDOWS-1252'], - alphabet=u'ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú', - wiki_start_pages=[u'Wikipédia:Página_principal']), - 'Romanian': Language(name='Romanian', - iso_code='ro', - use_ascii=True, - charsets=['ISO-8859-2', 'WINDOWS-1250'], - alphabet=u'ăâîșțĂÂÎȘȚ', - wiki_start_pages=[u'Pagina_principală']), - 'Russian': Language(name='Russian', - iso_code='ru', - use_ascii=False, - charsets=['ISO-8859-5', 'WINDOWS-1251', - 'KOI8-R', 'MacCyrillic', 'IBM866', - 'IBM855'], - alphabet=(u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя' - u'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'), - wiki_start_pages=[u'Заглавная_страница']), - 'Slovak': Language(name='Slovak', - iso_code='sk', - use_ascii=True, - charsets=['ISO-8859-2', 'WINDOWS-1250'], - alphabet=u'áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ', - wiki_start_pages=[u'Hlavná_stránka']), - 'Slovene': Language(name='Slovene', - iso_code='sl', - # Q, W, X, Y are only used for foreign words. - use_ascii=False, - charsets=['ISO-8859-2', 'WINDOWS-1250'], - alphabet=(u'abcčdefghijklmnoprsštuvzž' - u'ABCČDEFGHIJKLMNOPRSŠTUVZŽ'), - wiki_start_pages=[u'Glavna_stran']), - # Serbian can be written in both Latin and Cyrillic, but there's no - # simple way to get the Latin alphabet pages from Wikipedia through - # the API, so for now we just support Cyrillic. - 'Serbian': Language(name='Serbian', - iso_code='sr', - alphabet=(u'АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШ' - u'абвгдђежзијклљмнњопрстћуфхцчџш'), - charsets=['ISO-8859-5', 'WINDOWS-1251', - 'MacCyrillic', 'IBM855'], - wiki_start_pages=[u'Главна_страна']), - 'Thai': Language(name='Thai', - iso_code='th', - use_ascii=False, - charsets=['ISO-8859-11', 'TIS-620', 'CP874'], - alphabet=u'กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛', - wiki_start_pages=[u'หน้าหลัก']), - 'Turkish': Language(name='Turkish', - iso_code='tr', - # Q, W, and X are not used by Turkish - use_ascii=False, - charsets=['ISO-8859-3', 'ISO-8859-9', - 'WINDOWS-1254'], - alphabet=(u'abcçdefgğhıijklmnoöprsştuüvyzâîû' - u'ABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ'), - wiki_start_pages=[u'Ana_Sayfa']), - 'Vietnamese': Language(name='Vietnamese', - iso_code='vi', - use_ascii=False, - # Windows-1258 is the only common 8-bit - # Vietnamese encoding supported by Python. - # From Wikipedia: - # For systems that lack support for Unicode, - # dozens of 8-bit Vietnamese code pages are - # available.[1] The most common are VISCII - # (TCVN 5712:1993), VPS, and Windows-1258.[3] - # Where ASCII is required, such as when - # ensuring readability in plain text e-mail, - # Vietnamese letters are often encoded - # according to Vietnamese Quoted-Readable - # (VIQR) or VSCII Mnemonic (VSCII-MNEM),[4] - # though usage of either variable-width - # scheme has declined dramatically following - # the adoption of Unicode on the World Wide - # Web. - charsets=['WINDOWS-1258'], - alphabet=(u'aăâbcdđeêghiklmnoôơpqrstuưvxy' - u'AĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY'), - wiki_start_pages=[u'Chữ_Quốc_ngữ']), - } +LANGUAGES = { + "Arabic": Language( + name="Arabic", + iso_code="ar", + use_ascii=False, + # We only support encodings that use isolated + # forms, because the current recommendation is + # that the rendering system handles presentation + # forms. This means we purposefully skip IBM864. + charsets=["ISO-8859-6", "WINDOWS-1256", "CP720", "CP864"], + alphabet="ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ", + wiki_start_pages=["الصفحة_الرئيسية"], + ), + "Belarusian": Language( + name="Belarusian", + iso_code="be", + use_ascii=False, + charsets=["ISO-8859-5", "WINDOWS-1251", "IBM866", "MacCyrillic"], + alphabet="АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯабвгдеёжзійклмнопрстуўфхцчшыьэюяʼ", + wiki_start_pages=["Галоўная_старонка"], + ), + "Bulgarian": Language( + name="Bulgarian", + iso_code="bg", + use_ascii=False, + charsets=["ISO-8859-5", "WINDOWS-1251", "IBM855"], + alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя", + wiki_start_pages=["Начална_страница"], + ), + "Czech": Language( + name="Czech", + iso_code="cz", + use_ascii=True, + charsets=["ISO-8859-2", "WINDOWS-1250"], + alphabet="áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ", + wiki_start_pages=["Hlavní_strana"], + ), + "Danish": Language( + name="Danish", + iso_code="da", + use_ascii=True, + charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"], + alphabet="æøåÆØÅ", + wiki_start_pages=["Forside"], + ), + "German": Language( + name="German", + iso_code="de", + use_ascii=True, + charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"], + alphabet="äöüßẞÄÖÜ", + wiki_start_pages=["Wikipedia:Hauptseite"], + ), + "Greek": Language( + name="Greek", + iso_code="el", + use_ascii=False, + charsets=["ISO-8859-7", "WINDOWS-1253"], + alphabet="αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ", + wiki_start_pages=["Πύλη:Κύρια"], + ), + "English": Language( + name="English", + iso_code="en", + use_ascii=True, + charsets=["ISO-8859-1", "WINDOWS-1252", "MacRoman"], + wiki_start_pages=["Main_Page"], + ), + "Esperanto": Language( + name="Esperanto", + iso_code="eo", + # Q, W, X, and Y not used at all + use_ascii=False, + charsets=["ISO-8859-3"], + alphabet="abcĉdefgĝhĥijĵklmnoprsŝtuŭvzABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ", + wiki_start_pages=["Vikipedio:Ĉefpaĝo"], + ), + "Spanish": Language( + name="Spanish", + iso_code="es", + use_ascii=True, + charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"], + alphabet="ñáéíóúüÑÁÉÍÓÚÜ", + wiki_start_pages=["Wikipedia:Portada"], + ), + "Estonian": Language( + name="Estonian", + iso_code="et", + use_ascii=False, + charsets=["ISO-8859-4", "ISO-8859-13", "WINDOWS-1257"], + # C, F, Š, Q, W, X, Y, Z, Ž are only for + # loanwords + alphabet="ABDEGHIJKLMNOPRSTUVÕÄÖÜabdeghijklmnoprstuvõäöü", + wiki_start_pages=["Esileht"], + ), + "Finnish": Language( + name="Finnish", + iso_code="fi", + use_ascii=True, + charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"], + alphabet="ÅÄÖŠŽåäöšž", + wiki_start_pages=["Wikipedia:Etusivu"], + ), + "French": Language( + name="French", + iso_code="fr", + use_ascii=True, + charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"], + alphabet="œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ", + wiki_start_pages=["Wikipédia:Accueil_principal", "Bœuf (animal)"], + ), + "Hebrew": Language( + name="Hebrew", + iso_code="he", + use_ascii=False, + charsets=["ISO-8859-8", "WINDOWS-1255"], + alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ", + wiki_start_pages=["עמוד_ראשי"], + ), + "Croatian": Language( + name="Croatian", + iso_code="hr", + # Q, W, X, Y are only used for foreign words. + use_ascii=False, + charsets=["ISO-8859-2", "WINDOWS-1250"], + alphabet="abcčćdđefghijklmnoprsštuvzžABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ", + wiki_start_pages=["Glavna_stranica"], + ), + "Hungarian": Language( + name="Hungarian", + iso_code="hu", + # Q, W, X, Y are only used for foreign words. + use_ascii=False, + charsets=["ISO-8859-2", "WINDOWS-1250"], + alphabet="abcdefghijklmnoprstuvzáéíóöőúüűABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ", + wiki_start_pages=["Kezdőlap"], + ), + "Italian": Language( + name="Italian", + iso_code="it", + use_ascii=True, + charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"], + alphabet="ÀÈÉÌÒÓÙàèéìòóù", + wiki_start_pages=["Pagina_principale"], + ), + "Lithuanian": Language( + name="Lithuanian", + iso_code="lt", + use_ascii=False, + charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"], + # Q, W, and X not used at all + alphabet="AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽaąbcčdeęėfghiįyjklmnoprsštuųūvzž", + wiki_start_pages=["Pagrindinis_puslapis"], + ), + "Latvian": Language( + name="Latvian", + iso_code="lv", + use_ascii=False, + charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"], + # Q, W, X, Y are only for loanwords + alphabet="AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽaābcčdeēfgģhiījkķlļmnņoprsštuūvzž", + wiki_start_pages=["Sākumlapa"], + ), + "Macedonian": Language( + name="Macedonian", + iso_code="mk", + use_ascii=False, + charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"], + alphabet="АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШабвгдѓежзѕијклљмнњопрстќуфхцчџш", + wiki_start_pages=["Главна_страница"], + ), + "Dutch": Language( + name="Dutch", + iso_code="nl", + use_ascii=True, + charsets=["ISO-8859-1", "WINDOWS-1252", "MacRoman"], + wiki_start_pages=["Hoofdpagina"], + ), + "Polish": Language( + name="Polish", + iso_code="pl", + # Q and X are only used for foreign words. + use_ascii=False, + charsets=["ISO-8859-2", "WINDOWS-1250"], + alphabet="AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻaąbcćdeęfghijklłmnńoóprsśtuwyzźż", + wiki_start_pages=["Wikipedia:Strona_główna"], + ), + "Portuguese": Language( + name="Portuguese", + iso_code="pt", + use_ascii=True, + charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"], + alphabet="ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú", + wiki_start_pages=["Wikipédia:Página_principal"], + ), + "Romanian": Language( + name="Romanian", + iso_code="ro", + use_ascii=True, + charsets=["ISO-8859-2", "WINDOWS-1250"], + alphabet="ăâîșțĂÂÎȘȚ", + wiki_start_pages=["Pagina_principală"], + ), + "Russian": Language( + name="Russian", + iso_code="ru", + use_ascii=False, + charsets=[ + "ISO-8859-5", + "WINDOWS-1251", + "KOI8-R", + "MacCyrillic", + "IBM866", + "IBM855", + ], + alphabet="абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ", + wiki_start_pages=["Заглавная_страница"], + ), + "Slovak": Language( + name="Slovak", + iso_code="sk", + use_ascii=True, + charsets=["ISO-8859-2", "WINDOWS-1250"], + alphabet="áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ", + wiki_start_pages=["Hlavná_stránka"], + ), + "Slovene": Language( + name="Slovene", + iso_code="sl", + # Q, W, X, Y are only used for foreign words. + use_ascii=False, + charsets=["ISO-8859-2", "WINDOWS-1250"], + alphabet="abcčdefghijklmnoprsštuvzžABCČDEFGHIJKLMNOPRSŠTUVZŽ", + wiki_start_pages=["Glavna_stran"], + ), + # Serbian can be written in both Latin and Cyrillic, but there's no + # simple way to get the Latin alphabet pages from Wikipedia through + # the API, so for now we just support Cyrillic. + "Serbian": Language( + name="Serbian", + iso_code="sr", + alphabet="АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШабвгдђежзијклљмнњопрстћуфхцчџш", + charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"], + wiki_start_pages=["Главна_страна"], + ), + "Thai": Language( + name="Thai", + iso_code="th", + use_ascii=False, + charsets=["ISO-8859-11", "TIS-620", "CP874"], + alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛", + wiki_start_pages=["หน้าหลัก"], + ), + "Turkish": Language( + name="Turkish", + iso_code="tr", + # Q, W, and X are not used by Turkish + use_ascii=False, + charsets=["ISO-8859-3", "ISO-8859-9", "WINDOWS-1254"], + alphabet="abcçdefgğhıijklmnoöprsştuüvyzâîûABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ", + wiki_start_pages=["Ana_Sayfa"], + ), + "Vietnamese": Language( + name="Vietnamese", + iso_code="vi", + use_ascii=False, + # Windows-1258 is the only common 8-bit + # Vietnamese encoding supported by Python. + # From Wikipedia: + # For systems that lack support for Unicode, + # dozens of 8-bit Vietnamese code pages are + # available.[1] The most common are VISCII + # (TCVN 5712:1993), VPS, and Windows-1258.[3] + # Where ASCII is required, such as when + # ensuring readability in plain text e-mail, + # Vietnamese letters are often encoded + # according to Vietnamese Quoted-Readable + # (VIQR) or VSCII Mnemonic (VSCII-MNEM),[4] + # though usage of either variable-width + # scheme has declined dramatically following + # the adoption of Unicode on the World Wide + # Web. + charsets=["WINDOWS-1258"], + alphabet="aăâbcdđeêghiklmnoôơpqrstuưvxyAĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY", + wiki_start_pages=["Chữ_Quốc_ngữ"], + ), +} diff --git a/env/Lib/site-packages/pip/_vendor/chardet/sbcharsetprober.py b/env/Lib/site-packages/pip/_vendor/chardet/sbcharsetprober.py index 46ba835c66c9f4c3b15b0a0671447d33b3b240d1..0ffbcdd2c3e21b68566c88a3f05239447489df84 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/sbcharsetprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/sbcharsetprober.py @@ -26,70 +26,77 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from collections import namedtuple +from typing import Dict, List, NamedTuple, Optional, Union from .charsetprober import CharSetProber from .enums import CharacterCategory, ProbingState, SequenceLikelihood -SingleByteCharSetModel = namedtuple('SingleByteCharSetModel', - ['charset_name', - 'language', - 'char_to_order_map', - 'language_model', - 'typical_positive_ratio', - 'keep_ascii_letters', - 'alphabet']) +class SingleByteCharSetModel(NamedTuple): + charset_name: str + language: str + char_to_order_map: Dict[int, int] + language_model: Dict[int, Dict[int, int]] + typical_positive_ratio: float + keep_ascii_letters: bool + alphabet: str class SingleByteCharSetProber(CharSetProber): SAMPLE_SIZE = 64 - SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2 + SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2 POSITIVE_SHORTCUT_THRESHOLD = 0.95 NEGATIVE_SHORTCUT_THRESHOLD = 0.05 - def __init__(self, model, reversed=False, name_prober=None): - super(SingleByteCharSetProber, self).__init__() + def __init__( + self, + model: SingleByteCharSetModel, + is_reversed: bool = False, + name_prober: Optional[CharSetProber] = None, + ) -> None: + super().__init__() self._model = model # TRUE if we need to reverse every pair in the model lookup - self._reversed = reversed + self._reversed = is_reversed # Optional auxiliary prober for name decision self._name_prober = name_prober - self._last_order = None - self._seq_counters = None - self._total_seqs = None - self._total_char = None - self._freq_char = None + self._last_order = 255 + self._seq_counters: List[int] = [] + self._total_seqs = 0 + self._total_char = 0 + self._control_char = 0 + self._freq_char = 0 self.reset() - def reset(self): - super(SingleByteCharSetProber, self).reset() + def reset(self) -> None: + super().reset() # char order of last character self._last_order = 255 self._seq_counters = [0] * SequenceLikelihood.get_num_categories() self._total_seqs = 0 self._total_char = 0 + self._control_char = 0 # characters that fall in our sampling range self._freq_char = 0 @property - def charset_name(self): + def charset_name(self) -> Optional[str]: if self._name_prober: return self._name_prober.charset_name - else: - return self._model.charset_name + return self._model.charset_name @property - def language(self): + def language(self) -> Optional[str]: if self._name_prober: return self._name_prober.language - else: - return self._model.language + return self._model.language - def feed(self, byte_str): + def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: # TODO: Make filter_international_words keep things in self.alphabet if not self._model.keep_ascii_letters: byte_str = self.filter_international_words(byte_str) + else: + byte_str = self.remove_xml_tags(byte_str) if not byte_str: return self.state char_to_order_map = self._model.char_to_order_map @@ -103,9 +110,6 @@ class SingleByteCharSetProber(CharSetProber): # _total_char purposes. if order < CharacterCategory.CONTROL: self._total_char += 1 - # TODO: Follow uchardet's lead and discount confidence for frequent - # control characters. - # See https://github.com/BYVoid/uchardet/commit/55b4f23971db61 if order < self.SAMPLE_SIZE: self._freq_char += 1 if self._last_order < self.SAMPLE_SIZE: @@ -122,23 +126,36 @@ class SingleByteCharSetProber(CharSetProber): if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD: confidence = self.get_confidence() if confidence > self.POSITIVE_SHORTCUT_THRESHOLD: - self.logger.debug('%s confidence = %s, we have a winner', - charset_name, confidence) + self.logger.debug( + "%s confidence = %s, we have a winner", charset_name, confidence + ) self._state = ProbingState.FOUND_IT elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD: - self.logger.debug('%s confidence = %s, below negative ' - 'shortcut threshhold %s', charset_name, - confidence, - self.NEGATIVE_SHORTCUT_THRESHOLD) + self.logger.debug( + "%s confidence = %s, below negative shortcut threshold %s", + charset_name, + confidence, + self.NEGATIVE_SHORTCUT_THRESHOLD, + ) self._state = ProbingState.NOT_ME return self.state - def get_confidence(self): + def get_confidence(self) -> float: r = 0.01 if self._total_seqs > 0: - r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) / - self._total_seqs / self._model.typical_positive_ratio) + r = ( + ( + self._seq_counters[SequenceLikelihood.POSITIVE] + + 0.25 * self._seq_counters[SequenceLikelihood.LIKELY] + ) + / self._total_seqs + / self._model.typical_positive_ratio + ) + # The more control characters (proportionnaly to the size + # of the text), the less confident we become in the current + # charset. + r = r * (self._total_char - self._control_char) / self._total_char r = r * self._freq_char / self._total_char if r >= 1.0: r = 0.99 diff --git a/env/Lib/site-packages/pip/_vendor/chardet/sbcsgroupprober.py b/env/Lib/site-packages/pip/_vendor/chardet/sbcsgroupprober.py index bdeef4e15b0dc5a68220b14c9dcec1a019401106..890ae8465c5b0ad2a5f99464fe5f5c0be49809f1 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/sbcsgroupprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/sbcsgroupprober.py @@ -28,33 +28,38 @@ from .charsetgroupprober import CharSetGroupProber from .hebrewprober import HebrewProber -from .langbulgarianmodel import (ISO_8859_5_BULGARIAN_MODEL, - WINDOWS_1251_BULGARIAN_MODEL) +from .langbulgarianmodel import ISO_8859_5_BULGARIAN_MODEL, WINDOWS_1251_BULGARIAN_MODEL from .langgreekmodel import ISO_8859_7_GREEK_MODEL, WINDOWS_1253_GREEK_MODEL from .langhebrewmodel import WINDOWS_1255_HEBREW_MODEL + # from .langhungarianmodel import (ISO_8859_2_HUNGARIAN_MODEL, # WINDOWS_1250_HUNGARIAN_MODEL) -from .langrussianmodel import (IBM855_RUSSIAN_MODEL, IBM866_RUSSIAN_MODEL, - ISO_8859_5_RUSSIAN_MODEL, KOI8_R_RUSSIAN_MODEL, - MACCYRILLIC_RUSSIAN_MODEL, - WINDOWS_1251_RUSSIAN_MODEL) +from .langrussianmodel import ( + IBM855_RUSSIAN_MODEL, + IBM866_RUSSIAN_MODEL, + ISO_8859_5_RUSSIAN_MODEL, + KOI8_R_RUSSIAN_MODEL, + MACCYRILLIC_RUSSIAN_MODEL, + WINDOWS_1251_RUSSIAN_MODEL, +) from .langthaimodel import TIS_620_THAI_MODEL from .langturkishmodel import ISO_8859_9_TURKISH_MODEL from .sbcharsetprober import SingleByteCharSetProber class SBCSGroupProber(CharSetGroupProber): - def __init__(self): - super(SBCSGroupProber, self).__init__() + def __init__(self) -> None: + super().__init__() hebrew_prober = HebrewProber() - logical_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL, - False, hebrew_prober) + logical_hebrew_prober = SingleByteCharSetProber( + WINDOWS_1255_HEBREW_MODEL, is_reversed=False, name_prober=hebrew_prober + ) # TODO: See if using ISO-8859-8 Hebrew model works better here, since # it's actually the visual one - visual_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL, - True, hebrew_prober) - hebrew_prober.set_model_probers(logical_hebrew_prober, - visual_hebrew_prober) + visual_hebrew_prober = SingleByteCharSetProber( + WINDOWS_1255_HEBREW_MODEL, is_reversed=True, name_prober=hebrew_prober + ) + hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober) # TODO: ORDER MATTERS HERE. I changed the order vs what was in master # and several tests failed that did not before. Some thought # should be put into the ordering, and we should consider making diff --git a/env/Lib/site-packages/pip/_vendor/chardet/sjisprober.py b/env/Lib/site-packages/pip/_vendor/chardet/sjisprober.py index 9e29623bdc54a7c6d11bcc167d71bb44cc9be39d..91df077961b6310b8e1c708b74003d5343bff6a8 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/sjisprober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/sjisprober.py @@ -25,68 +25,81 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine +from typing import Union + from .chardistribution import SJISDistributionAnalysis +from .codingstatemachine import CodingStateMachine +from .enums import MachineState, ProbingState from .jpcntx import SJISContextAnalysis +from .mbcharsetprober import MultiByteCharSetProber from .mbcssm import SJIS_SM_MODEL -from .enums import ProbingState, MachineState class SJISProber(MultiByteCharSetProber): - def __init__(self): - super(SJISProber, self).__init__() + def __init__(self) -> None: + super().__init__() self.coding_sm = CodingStateMachine(SJIS_SM_MODEL) self.distribution_analyzer = SJISDistributionAnalysis() self.context_analyzer = SJISContextAnalysis() self.reset() - def reset(self): - super(SJISProber, self).reset() + def reset(self) -> None: + super().reset() self.context_analyzer.reset() @property - def charset_name(self): + def charset_name(self) -> str: return self.context_analyzer.charset_name @property - def language(self): + def language(self) -> str: return "Japanese" - def feed(self, byte_str): - for i in range(len(byte_str)): - coding_state = self.coding_sm.next_state(byte_str[i]) + def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: + assert self.coding_sm is not None + assert self.distribution_analyzer is not None + + for i, byte in enumerate(byte_str): + coding_state = self.coding_sm.next_state(byte) if coding_state == MachineState.ERROR: - self.logger.debug('%s %s prober hit error at byte %s', - self.charset_name, self.language, i) + self.logger.debug( + "%s %s prober hit error at byte %s", + self.charset_name, + self.language, + i, + ) self._state = ProbingState.NOT_ME break - elif coding_state == MachineState.ITS_ME: + if coding_state == MachineState.ITS_ME: self._state = ProbingState.FOUND_IT break - elif coding_state == MachineState.START: + if coding_state == MachineState.START: char_len = self.coding_sm.get_current_charlen() if i == 0: - self._last_char[1] = byte_str[0] - self.context_analyzer.feed(self._last_char[2 - char_len:], - char_len) + self._last_char[1] = byte + self.context_analyzer.feed( + self._last_char[2 - char_len :], char_len + ) self.distribution_analyzer.feed(self._last_char, char_len) else: - self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3 - - char_len], char_len) - self.distribution_analyzer.feed(byte_str[i - 1:i + 1], - char_len) + self.context_analyzer.feed( + byte_str[i + 1 - char_len : i + 3 - char_len], char_len + ) + self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len) self._last_char[0] = byte_str[-1] if self.state == ProbingState.DETECTING: - if (self.context_analyzer.got_enough_data() and - (self.get_confidence() > self.SHORTCUT_THRESHOLD)): + if self.context_analyzer.got_enough_data() and ( + self.get_confidence() > self.SHORTCUT_THRESHOLD + ): self._state = ProbingState.FOUND_IT return self.state - def get_confidence(self): + def get_confidence(self) -> float: + assert self.distribution_analyzer is not None + context_conf = self.context_analyzer.get_confidence() distrib_conf = self.distribution_analyzer.get_confidence() return max(context_conf, distrib_conf) diff --git a/env/Lib/site-packages/pip/_vendor/chardet/universaldetector.py b/env/Lib/site-packages/pip/_vendor/chardet/universaldetector.py index 055a8ac1b1d21ed8d6d8c9a1217e79df9d1396c1..30c441dc28ee327076a850b1d3c88a9a2c8f04f0 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/universaldetector.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/universaldetector.py @@ -39,16 +39,21 @@ class a user of ``chardet`` should use. import codecs import logging import re +from typing import List, Optional, Union from .charsetgroupprober import CharSetGroupProber +from .charsetprober import CharSetProber from .enums import InputState, LanguageFilter, ProbingState from .escprober import EscCharSetProber from .latin1prober import Latin1Prober +from .macromanprober import MacRomanProber from .mbcsgroupprober import MBCSGroupProber +from .resultdict import ResultDict from .sbcsgroupprober import SBCSGroupProber +from .utf1632prober import UTF1632Prober -class UniversalDetector(object): +class UniversalDetector: """ The ``UniversalDetector`` class underlies the ``chardet.detect`` function and coordinates all of the different charset probers. @@ -66,49 +71,87 @@ class UniversalDetector(object): """ MINIMUM_THRESHOLD = 0.20 - HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]') - ESC_DETECTOR = re.compile(b'(\033|~{)') - WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]') - ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252', - 'iso-8859-2': 'Windows-1250', - 'iso-8859-5': 'Windows-1251', - 'iso-8859-6': 'Windows-1256', - 'iso-8859-7': 'Windows-1253', - 'iso-8859-8': 'Windows-1255', - 'iso-8859-9': 'Windows-1254', - 'iso-8859-13': 'Windows-1257'} - - def __init__(self, lang_filter=LanguageFilter.ALL): - self._esc_charset_prober = None - self._charset_probers = [] - self.result = None - self.done = None - self._got_data = None - self._input_state = None - self._last_char = None + HIGH_BYTE_DETECTOR = re.compile(b"[\x80-\xFF]") + ESC_DETECTOR = re.compile(b"(\033|~{)") + WIN_BYTE_DETECTOR = re.compile(b"[\x80-\x9F]") + ISO_WIN_MAP = { + "iso-8859-1": "Windows-1252", + "iso-8859-2": "Windows-1250", + "iso-8859-5": "Windows-1251", + "iso-8859-6": "Windows-1256", + "iso-8859-7": "Windows-1253", + "iso-8859-8": "Windows-1255", + "iso-8859-9": "Windows-1254", + "iso-8859-13": "Windows-1257", + } + # Based on https://encoding.spec.whatwg.org/#names-and-labels + # but altered to match Python names for encodings and remove mappings + # that break tests. + LEGACY_MAP = { + "ascii": "Windows-1252", + "iso-8859-1": "Windows-1252", + "tis-620": "ISO-8859-11", + "iso-8859-9": "Windows-1254", + "gb2312": "GB18030", + "euc-kr": "CP949", + "utf-16le": "UTF-16", + } + + def __init__( + self, + lang_filter: LanguageFilter = LanguageFilter.ALL, + should_rename_legacy: bool = False, + ) -> None: + self._esc_charset_prober: Optional[EscCharSetProber] = None + self._utf1632_prober: Optional[UTF1632Prober] = None + self._charset_probers: List[CharSetProber] = [] + self.result: ResultDict = { + "encoding": None, + "confidence": 0.0, + "language": None, + } + self.done = False + self._got_data = False + self._input_state = InputState.PURE_ASCII + self._last_char = b"" self.lang_filter = lang_filter self.logger = logging.getLogger(__name__) - self._has_win_bytes = None + self._has_win_bytes = False + self.should_rename_legacy = should_rename_legacy self.reset() - def reset(self): + @property + def input_state(self) -> int: + return self._input_state + + @property + def has_win_bytes(self) -> bool: + return self._has_win_bytes + + @property + def charset_probers(self) -> List[CharSetProber]: + return self._charset_probers + + def reset(self) -> None: """ Reset the UniversalDetector and all of its probers back to their initial states. This is called by ``__init__``, so you only need to call this directly in between analyses of different documents. """ - self.result = {'encoding': None, 'confidence': 0.0, 'language': None} + self.result = {"encoding": None, "confidence": 0.0, "language": None} self.done = False self._got_data = False self._has_win_bytes = False self._input_state = InputState.PURE_ASCII - self._last_char = b'' + self._last_char = b"" if self._esc_charset_prober: self._esc_charset_prober.reset() + if self._utf1632_prober: + self._utf1632_prober.reset() for prober in self._charset_probers: prober.reset() - def feed(self, byte_str): + def feed(self, byte_str: Union[bytes, bytearray]) -> None: """ Takes a chunk of a document and feeds it through all of the relevant charset probers. @@ -125,7 +168,7 @@ class UniversalDetector(object): if self.done: return - if not len(byte_str): + if not byte_str: return if not isinstance(byte_str, bytearray): @@ -136,35 +179,38 @@ class UniversalDetector(object): # If the data starts with BOM, we know it is UTF if byte_str.startswith(codecs.BOM_UTF8): # EF BB BF UTF-8 with BOM - self.result = {'encoding': "UTF-8-SIG", - 'confidence': 1.0, - 'language': ''} - elif byte_str.startswith((codecs.BOM_UTF32_LE, - codecs.BOM_UTF32_BE)): + self.result = { + "encoding": "UTF-8-SIG", + "confidence": 1.0, + "language": "", + } + elif byte_str.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)): # FF FE 00 00 UTF-32, little-endian BOM # 00 00 FE FF UTF-32, big-endian BOM - self.result = {'encoding': "UTF-32", - 'confidence': 1.0, - 'language': ''} - elif byte_str.startswith(b'\xFE\xFF\x00\x00'): + self.result = {"encoding": "UTF-32", "confidence": 1.0, "language": ""} + elif byte_str.startswith(b"\xFE\xFF\x00\x00"): # FE FF 00 00 UCS-4, unusual octet order BOM (3412) - self.result = {'encoding': "X-ISO-10646-UCS-4-3412", - 'confidence': 1.0, - 'language': ''} - elif byte_str.startswith(b'\x00\x00\xFF\xFE'): + self.result = { + # TODO: This encoding is not supported by Python. Should remove? + "encoding": "X-ISO-10646-UCS-4-3412", + "confidence": 1.0, + "language": "", + } + elif byte_str.startswith(b"\x00\x00\xFF\xFE"): # 00 00 FF FE UCS-4, unusual octet order BOM (2143) - self.result = {'encoding': "X-ISO-10646-UCS-4-2143", - 'confidence': 1.0, - 'language': ''} + self.result = { + # TODO: This encoding is not supported by Python. Should remove? + "encoding": "X-ISO-10646-UCS-4-2143", + "confidence": 1.0, + "language": "", + } elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)): # FF FE UTF-16, little endian BOM # FE FF UTF-16, big endian BOM - self.result = {'encoding': "UTF-16", - 'confidence': 1.0, - 'language': ''} + self.result = {"encoding": "UTF-16", "confidence": 1.0, "language": ""} self._got_data = True - if self.result['encoding'] is not None: + if self.result["encoding"] is not None: self.done = True return @@ -173,12 +219,29 @@ class UniversalDetector(object): if self._input_state == InputState.PURE_ASCII: if self.HIGH_BYTE_DETECTOR.search(byte_str): self._input_state = InputState.HIGH_BYTE - elif self._input_state == InputState.PURE_ASCII and \ - self.ESC_DETECTOR.search(self._last_char + byte_str): + elif ( + self._input_state == InputState.PURE_ASCII + and self.ESC_DETECTOR.search(self._last_char + byte_str) + ): self._input_state = InputState.ESC_ASCII self._last_char = byte_str[-1:] + # next we will look to see if it is appears to be either a UTF-16 or + # UTF-32 encoding + if not self._utf1632_prober: + self._utf1632_prober = UTF1632Prober() + + if self._utf1632_prober.state == ProbingState.DETECTING: + if self._utf1632_prober.feed(byte_str) == ProbingState.FOUND_IT: + self.result = { + "encoding": self._utf1632_prober.charset_name, + "confidence": self._utf1632_prober.get_confidence(), + "language": "", + } + self.done = True + return + # If we've seen escape sequences, use the EscCharSetProber, which # uses a simple state machine to check for known escape sequences in # HZ and ISO-2022 encodings, since those are the only encodings that @@ -187,12 +250,11 @@ class UniversalDetector(object): if not self._esc_charset_prober: self._esc_charset_prober = EscCharSetProber(self.lang_filter) if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT: - self.result = {'encoding': - self._esc_charset_prober.charset_name, - 'confidence': - self._esc_charset_prober.get_confidence(), - 'language': - self._esc_charset_prober.language} + self.result = { + "encoding": self._esc_charset_prober.charset_name, + "confidence": self._esc_charset_prober.get_confidence(), + "language": self._esc_charset_prober.language, + } self.done = True # If we've seen high bytes (i.e., those with values greater than 127), # we need to do more complicated checks using all our multi-byte and @@ -207,17 +269,20 @@ class UniversalDetector(object): if self.lang_filter & LanguageFilter.NON_CJK: self._charset_probers.append(SBCSGroupProber()) self._charset_probers.append(Latin1Prober()) + self._charset_probers.append(MacRomanProber()) for prober in self._charset_probers: if prober.feed(byte_str) == ProbingState.FOUND_IT: - self.result = {'encoding': prober.charset_name, - 'confidence': prober.get_confidence(), - 'language': prober.language} + self.result = { + "encoding": prober.charset_name, + "confidence": prober.get_confidence(), + "language": prober.language, + } self.done = True break if self.WIN_BYTE_DETECTOR.search(byte_str): self._has_win_bytes = True - def close(self): + def close(self) -> ResultDict: """ Stop analyzing the current document and come up with a final prediction. @@ -231,13 +296,11 @@ class UniversalDetector(object): self.done = True if not self._got_data: - self.logger.debug('no data received!') + self.logger.debug("no data received!") # Default to ASCII if it is all we've seen so far elif self._input_state == InputState.PURE_ASCII: - self.result = {'encoding': 'ascii', - 'confidence': 1.0, - 'language': ''} + self.result = {"encoding": "ascii", "confidence": 1.0, "language": ""} # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD elif self._input_state == InputState.HIGH_BYTE: @@ -253,34 +316,47 @@ class UniversalDetector(object): max_prober = prober if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD): charset_name = max_prober.charset_name - lower_charset_name = max_prober.charset_name.lower() + assert charset_name is not None + lower_charset_name = charset_name.lower() confidence = max_prober.get_confidence() # Use Windows encoding name instead of ISO-8859 if we saw any # extra Windows-specific bytes - if lower_charset_name.startswith('iso-8859'): + if lower_charset_name.startswith("iso-8859"): if self._has_win_bytes: - charset_name = self.ISO_WIN_MAP.get(lower_charset_name, - charset_name) - self.result = {'encoding': charset_name, - 'confidence': confidence, - 'language': max_prober.language} + charset_name = self.ISO_WIN_MAP.get( + lower_charset_name, charset_name + ) + # Rename legacy encodings with superset encodings if asked + if self.should_rename_legacy: + charset_name = self.LEGACY_MAP.get( + (charset_name or "").lower(), charset_name + ) + self.result = { + "encoding": charset_name, + "confidence": confidence, + "language": max_prober.language, + } # Log all prober confidences if none met MINIMUM_THRESHOLD if self.logger.getEffectiveLevel() <= logging.DEBUG: - if self.result['encoding'] is None: - self.logger.debug('no probers hit minimum threshold') + if self.result["encoding"] is None: + self.logger.debug("no probers hit minimum threshold") for group_prober in self._charset_probers: if not group_prober: continue if isinstance(group_prober, CharSetGroupProber): for prober in group_prober.probers: - self.logger.debug('%s %s confidence = %s', - prober.charset_name, - prober.language, - prober.get_confidence()) + self.logger.debug( + "%s %s confidence = %s", + prober.charset_name, + prober.language, + prober.get_confidence(), + ) else: - self.logger.debug('%s %s confidence = %s', - group_prober.charset_name, - group_prober.language, - group_prober.get_confidence()) + self.logger.debug( + "%s %s confidence = %s", + group_prober.charset_name, + group_prober.language, + group_prober.get_confidence(), + ) return self.result diff --git a/env/Lib/site-packages/pip/_vendor/chardet/utf8prober.py b/env/Lib/site-packages/pip/_vendor/chardet/utf8prober.py index 6c3196cc2d7e46e6756580267f5643c6f7b448dd..d96354d97c2195320d0acc1717a5876eafbea2af 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/utf8prober.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/utf8prober.py @@ -25,45 +25,46 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +from typing import Union + from .charsetprober import CharSetProber -from .enums import ProbingState, MachineState from .codingstatemachine import CodingStateMachine +from .enums import MachineState, ProbingState from .mbcssm import UTF8_SM_MODEL - class UTF8Prober(CharSetProber): ONE_CHAR_PROB = 0.5 - def __init__(self): - super(UTF8Prober, self).__init__() + def __init__(self) -> None: + super().__init__() self.coding_sm = CodingStateMachine(UTF8_SM_MODEL) - self._num_mb_chars = None + self._num_mb_chars = 0 self.reset() - def reset(self): - super(UTF8Prober, self).reset() + def reset(self) -> None: + super().reset() self.coding_sm.reset() self._num_mb_chars = 0 @property - def charset_name(self): + def charset_name(self) -> str: return "utf-8" @property - def language(self): + def language(self) -> str: return "" - def feed(self, byte_str): + def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: for c in byte_str: coding_state = self.coding_sm.next_state(c) if coding_state == MachineState.ERROR: self._state = ProbingState.NOT_ME break - elif coding_state == MachineState.ITS_ME: + if coding_state == MachineState.ITS_ME: self._state = ProbingState.FOUND_IT break - elif coding_state == MachineState.START: + if coding_state == MachineState.START: if self.coding_sm.get_current_charlen() >= 2: self._num_mb_chars += 1 @@ -73,10 +74,9 @@ class UTF8Prober(CharSetProber): return self.state - def get_confidence(self): + def get_confidence(self) -> float: unlike = 0.99 if self._num_mb_chars < 6: - unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars + unlike *= self.ONE_CHAR_PROB**self._num_mb_chars return 1.0 - unlike - else: - return unlike + return unlike diff --git a/env/Lib/site-packages/pip/_vendor/chardet/version.py b/env/Lib/site-packages/pip/_vendor/chardet/version.py index 70369b9d663414c24f1e042c5b30a8f8c7bbd2b2..c5e9d85cd75884b129d4ab8d0453c0e50d0c1f68 100644 --- a/env/Lib/site-packages/pip/_vendor/chardet/version.py +++ b/env/Lib/site-packages/pip/_vendor/chardet/version.py @@ -1,9 +1,9 @@ """ This module exists only to simplify retrieving the version number of chardet -from within setup.py and from chardet subpackages. +from within setuptools and from chardet subpackages. :author: Dan Blanchard (dan.blanchard@gmail.com) """ -__version__ = "4.0.0" -VERSION = __version__.split('.') +__version__ = "5.1.0" +VERSION = __version__.split(".") diff --git a/env/Lib/site-packages/pip/_vendor/colorama/__init__.py b/env/Lib/site-packages/pip/_vendor/colorama/__init__.py index b149ed79b0a1d5808a7e392876c2f5aae4b5057c..383101cdb38706c305449674044e9288b92b7d75 100644 --- a/env/Lib/site-packages/pip/_vendor/colorama/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/colorama/__init__.py @@ -1,6 +1,7 @@ # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -from .initialise import init, deinit, reinit, colorama_text +from .initialise import init, deinit, reinit, colorama_text, just_fix_windows_console from .ansi import Fore, Back, Style, Cursor from .ansitowin32 import AnsiToWin32 -__version__ = '0.4.4' +__version__ = '0.4.6' + diff --git a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index bd8cbdaf1421633e7f2105a59c4780d29403dc3b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/ansi.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/ansi.cpython-39.pyc deleted file mode 100644 index 289921d1e1e4cdb0a890634e8f0fc739658ba064..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/ansi.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/ansitowin32.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/ansitowin32.cpython-39.pyc deleted file mode 100644 index 5a1d0f29b5e7600235db474f382655bfbeeea474..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/ansitowin32.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/initialise.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/initialise.cpython-39.pyc deleted file mode 100644 index afa6b47b1ecd18f626bde53106251302c2f2b69d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/initialise.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/win32.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/win32.cpython-39.pyc deleted file mode 100644 index 0c8709f0ac9c87dfdfa044124483cfb128d9b41e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/win32.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/winterm.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/winterm.cpython-39.pyc deleted file mode 100644 index 958e35ccec9b08bd312a543e7c196a74e518634f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/colorama/__pycache__/winterm.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py b/env/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py index 6039a0543204739849335ad27894cf64224ad828..abf209e60c7c4a9b1ae57452e36b383969848c2e 100644 --- a/env/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py +++ b/env/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py @@ -4,7 +4,7 @@ import sys import os from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL -from .winterm import WinTerm, WinColor, WinStyle +from .winterm import enable_vt_processing, WinTerm, WinColor, WinStyle from .win32 import windll, winapi_test @@ -37,6 +37,12 @@ class StreamWrapper(object): def __exit__(self, *args, **kwargs): return self.__wrapped.__exit__(*args, **kwargs) + def __setstate__(self, state): + self.__dict__ = state + + def __getstate__(self): + return self.__dict__ + def write(self, text): self.__convertor.write(text) @@ -57,7 +63,9 @@ class StreamWrapper(object): stream = self.__wrapped try: return stream.closed - except AttributeError: + # AttributeError in the case that the stream doesn't support being closed + # ValueError for the case that the stream has already been detached when atexit runs + except (AttributeError, ValueError): return True @@ -86,15 +94,22 @@ class AnsiToWin32(object): # (e.g. Cygwin Terminal). In this case it's up to the terminal # to support the ANSI codes. conversion_supported = on_windows and winapi_test() + try: + fd = wrapped.fileno() + except Exception: + fd = -1 + system_has_native_ansi = not on_windows or enable_vt_processing(fd) + have_tty = not self.stream.closed and self.stream.isatty() + need_conversion = conversion_supported and not system_has_native_ansi # should we strip ANSI sequences from our output? if strip is None: - strip = conversion_supported or (not self.stream.closed and not self.stream.isatty()) + strip = need_conversion or not have_tty self.strip = strip # should we should convert ANSI sequences into win32 calls? if convert is None: - convert = conversion_supported and not self.stream.closed and self.stream.isatty() + convert = need_conversion and have_tty self.convert = convert # dict of ansi codes to win32 functions and parameters @@ -256,3 +271,7 @@ class AnsiToWin32(object): if params[0] in '02': winterm.set_title(params[1]) return text + + + def flush(self): + self.wrapped.flush() diff --git a/env/Lib/site-packages/pip/_vendor/colorama/initialise.py b/env/Lib/site-packages/pip/_vendor/colorama/initialise.py index 430d0668727cd0521270a52c962867907d743b34..d5fd4b71fed1bb4871717f978f0c470280f099c1 100644 --- a/env/Lib/site-packages/pip/_vendor/colorama/initialise.py +++ b/env/Lib/site-packages/pip/_vendor/colorama/initialise.py @@ -6,13 +6,27 @@ import sys from .ansitowin32 import AnsiToWin32 -orig_stdout = None -orig_stderr = None +def _wipe_internal_state_for_tests(): + global orig_stdout, orig_stderr + orig_stdout = None + orig_stderr = None + + global wrapped_stdout, wrapped_stderr + wrapped_stdout = None + wrapped_stderr = None -wrapped_stdout = None -wrapped_stderr = None + global atexit_done + atexit_done = False + + global fixed_windows_console + fixed_windows_console = False -atexit_done = False + try: + # no-op if it wasn't registered + atexit.unregister(reset_all) + except AttributeError: + # python 2: no atexit.unregister. Oh well, we did our best. + pass def reset_all(): @@ -55,6 +69,29 @@ def deinit(): sys.stderr = orig_stderr +def just_fix_windows_console(): + global fixed_windows_console + + if sys.platform != "win32": + return + if fixed_windows_console: + return + if wrapped_stdout is not None or wrapped_stderr is not None: + # Someone already ran init() and it did stuff, so we won't second-guess them + return + + # On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the + # native ANSI support in the console as a side-effect. We only need to actually + # replace sys.stdout/stderr if we're in the old-style conversion mode. + new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False) + if new_stdout.convert: + sys.stdout = new_stdout + new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False) + if new_stderr.convert: + sys.stderr = new_stderr + + fixed_windows_console = True + @contextlib.contextmanager def colorama_text(*args, **kwargs): init(*args, **kwargs) @@ -78,3 +115,7 @@ def wrap_stream(stream, convert, strip, autoreset, wrap): if wrapper.should_wrap(): stream = wrapper.stream return stream + + +# Use this for initial setup as well, to reduce code duplication +_wipe_internal_state_for_tests() diff --git a/env/Lib/site-packages/pip/_vendor/colorama/win32.py b/env/Lib/site-packages/pip/_vendor/colorama/win32.py index c2d836033673993e00a02d5a3802b61cd051cf08..841b0e270a381cdfaca544a9be976d7276d83b1e 100644 --- a/env/Lib/site-packages/pip/_vendor/colorama/win32.py +++ b/env/Lib/site-packages/pip/_vendor/colorama/win32.py @@ -4,6 +4,8 @@ STDOUT = -11 STDERR = -12 +ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + try: import ctypes from ctypes import LibraryLoader @@ -89,6 +91,20 @@ else: ] _SetConsoleTitleW.restype = wintypes.BOOL + _GetConsoleMode = windll.kernel32.GetConsoleMode + _GetConsoleMode.argtypes = [ + wintypes.HANDLE, + POINTER(wintypes.DWORD) + ] + _GetConsoleMode.restype = wintypes.BOOL + + _SetConsoleMode = windll.kernel32.SetConsoleMode + _SetConsoleMode.argtypes = [ + wintypes.HANDLE, + wintypes.DWORD + ] + _SetConsoleMode.restype = wintypes.BOOL + def _winapi_test(handle): csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( @@ -150,3 +166,15 @@ else: def SetConsoleTitle(title): return _SetConsoleTitleW(title) + + def GetConsoleMode(handle): + mode = wintypes.DWORD() + success = _GetConsoleMode(handle, byref(mode)) + if not success: + raise ctypes.WinError() + return mode.value + + def SetConsoleMode(handle, mode): + success = _SetConsoleMode(handle, mode) + if not success: + raise ctypes.WinError() diff --git a/env/Lib/site-packages/pip/_vendor/colorama/winterm.py b/env/Lib/site-packages/pip/_vendor/colorama/winterm.py index 0fdb4ec4e91090876dc3fbf207049b521fa0dd73..aad867e8c80b826bf6a060116f17fa08a8eb0765 100644 --- a/env/Lib/site-packages/pip/_vendor/colorama/winterm.py +++ b/env/Lib/site-packages/pip/_vendor/colorama/winterm.py @@ -1,6 +1,12 @@ # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -from . import win32 +try: + from msvcrt import get_osfhandle +except ImportError: + def get_osfhandle(_): + raise OSError("This isn't windows!") + +from . import win32 # from wincon.h class WinColor(object): @@ -167,3 +173,23 @@ class WinTerm(object): def set_title(self, title): win32.SetConsoleTitle(title) + + +def enable_vt_processing(fd): + if win32.windll is None or not win32.winapi_test(): + return False + + try: + handle = get_osfhandle(fd) + mode = win32.GetConsoleMode(handle) + win32.SetConsoleMode( + handle, + mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + + mode = win32.GetConsoleMode(handle) + if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING: + return True + # Can get TypeError in testsuite where 'fd' is a Mock() + except (OSError, TypeError): + return False diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__init__.py b/env/Lib/site-packages/pip/_vendor/distlib/__init__.py index 63d916e345b6919a801c253ac7f8f852a5608965..962173c8d0a6906b59f2910c9cae759010534786 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/__init__.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2012-2019 Vinay Sajip. +# Copyright (C) 2012-2022 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import logging -__version__ = '0.3.1' +__version__ = '0.3.6' class DistlibException(Exception): pass diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 26fe7876846643be9ad59f28927ee4e43dc81a5d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-39.pyc deleted file mode 100644 index 41b0909f21b95625ab9c3a5a93e9b92145f3754d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/database.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/database.cpython-39.pyc deleted file mode 100644 index 912b3ee2d9dd1e4bc7da4e21c551ce45c0546ec1..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/database.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/index.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/index.cpython-39.pyc deleted file mode 100644 index 9665374b7abc14d7f9b2570423b6a7eb9e68ba16..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/index.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/locators.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/locators.cpython-39.pyc deleted file mode 100644 index 0becc23bf9eef5df250973155261d3ffbe49934a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/locators.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/manifest.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/manifest.cpython-39.pyc deleted file mode 100644 index 2aa069a2d60f9d2bac22989becf8ef4642702fab..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/manifest.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/markers.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/markers.cpython-39.pyc deleted file mode 100644 index 97123f12612c5c75856379337b9c09c8e05957a0..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/markers.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/metadata.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/metadata.cpython-39.pyc deleted file mode 100644 index 14140c9e7b7cdb11ccdd68e4dea3a443d048f5e3..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/metadata.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-39.pyc deleted file mode 100644 index 8d057ac273a36f696a6677b930d9ed15a3e5d43a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-39.pyc deleted file mode 100644 index d2bafdbba247cdcd44b5b5c68612d281ca1d7436..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-39.pyc deleted file mode 100644 index 22a066ee2fa540b79e7054b5e3c8c25d3e684f7f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/version.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/version.cpython-39.pyc deleted file mode 100644 index c97c1bf37a7349062ebe174707b8fa0b26df4e40..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/version.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/wheel.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/wheel.cpython-39.pyc deleted file mode 100644 index 7f8f24b6fb063e31e3d632d990e4a98405483d5c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/__pycache__/wheel.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py b/env/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py deleted file mode 100644 index f7dbf4c9aa8314816f9bcbe5357146369ee71391..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Modules copied from Python 3 standard libraries, for internal use only. - -Individual classes and functions are found in d2._backport.misc. Intended -usage is to always import things missing from 3.1 from that module: the -built-in/stdlib objects will be used if found. -""" diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 2d3302e2a94dfc0aeb36363d66d4ae1a5bc139ad..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/misc.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/misc.cpython-39.pyc deleted file mode 100644 index 8dd32bbd50bc42f64a21559f56a58dd142c39776..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/misc.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/shutil.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/shutil.cpython-39.pyc deleted file mode 100644 index 6276239783ad31ee70781dd0ba8b38c5ae70d506..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/shutil.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/sysconfig.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/sysconfig.cpython-39.pyc deleted file mode 100644 index 605f0df3d550f1f368c94f5d5ce6f660fc318f3f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/sysconfig.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/tarfile.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/tarfile.cpython-39.pyc deleted file mode 100644 index 81af67a91a823524ed75c3f3cc508531e7e39f6c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__pycache__/tarfile.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py b/env/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py deleted file mode 100644 index cfb318d34f761cde906d8b02dde3f9329688b8c6..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Backports for individual classes and functions.""" - -import os -import sys - -__all__ = ['cache_from_source', 'callable', 'fsencode'] - - -try: - from imp import cache_from_source -except ImportError: - def cache_from_source(py_file, debug=__debug__): - ext = debug and 'c' or 'o' - return py_file + ext - - -try: - callable = callable -except NameError: - from collections import Callable - - def callable(obj): - return isinstance(obj, Callable) - - -try: - fsencode = os.fsencode -except AttributeError: - def fsencode(filename): - if isinstance(filename, bytes): - return filename - elif isinstance(filename, str): - return filename.encode(sys.getfilesystemencoding()) - else: - raise TypeError("expect bytes or str, not %s" % - type(filename).__name__) diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py b/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py deleted file mode 100644 index 10ed362539718aed693f8155ce7ad55c64163aff..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py +++ /dev/null @@ -1,764 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Utility functions for copying and archiving files and directory trees. - -XXX The functions here don't copy the resource fork or other metadata on Mac. - -""" - -import os -import sys -import stat -from os.path import abspath -import fnmatch -try: - from collections.abc import Callable -except ImportError: - from collections import Callable -import errno -from . import tarfile - -try: - import bz2 - _BZ2_SUPPORTED = True -except ImportError: - _BZ2_SUPPORTED = False - -try: - from pwd import getpwnam -except ImportError: - getpwnam = None - -try: - from grp import getgrnam -except ImportError: - getgrnam = None - -__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", - "copytree", "move", "rmtree", "Error", "SpecialFileError", - "ExecError", "make_archive", "get_archive_formats", - "register_archive_format", "unregister_archive_format", - "get_unpack_formats", "register_unpack_format", - "unregister_unpack_format", "unpack_archive", "ignore_patterns"] - -class Error(EnvironmentError): - pass - -class SpecialFileError(EnvironmentError): - """Raised when trying to do a kind of operation (e.g. copying) which is - not supported on a special file (e.g. a named pipe)""" - -class ExecError(EnvironmentError): - """Raised when a command could not be executed""" - -class ReadError(EnvironmentError): - """Raised when an archive cannot be read""" - -class RegistryError(Exception): - """Raised when a registry operation with the archiving - and unpacking registries fails""" - - -try: - WindowsError -except NameError: - WindowsError = None - -def copyfileobj(fsrc, fdst, length=16*1024): - """copy data from file-like object fsrc to file-like object fdst""" - while 1: - buf = fsrc.read(length) - if not buf: - break - fdst.write(buf) - -def _samefile(src, dst): - # Macintosh, Unix. - if hasattr(os.path, 'samefile'): - try: - return os.path.samefile(src, dst) - except OSError: - return False - - # All other platforms: check for same pathname. - return (os.path.normcase(os.path.abspath(src)) == - os.path.normcase(os.path.abspath(dst))) - -def copyfile(src, dst): - """Copy data from src to dst""" - if _samefile(src, dst): - raise Error("`%s` and `%s` are the same file" % (src, dst)) - - for fn in [src, dst]: - try: - st = os.stat(fn) - except OSError: - # File most likely does not exist - pass - else: - # XXX What about other special files? (sockets, devices...) - if stat.S_ISFIFO(st.st_mode): - raise SpecialFileError("`%s` is a named pipe" % fn) - - with open(src, 'rb') as fsrc: - with open(dst, 'wb') as fdst: - copyfileobj(fsrc, fdst) - -def copymode(src, dst): - """Copy mode bits from src to dst""" - if hasattr(os, 'chmod'): - st = os.stat(src) - mode = stat.S_IMODE(st.st_mode) - os.chmod(dst, mode) - -def copystat(src, dst): - """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" - st = os.stat(src) - mode = stat.S_IMODE(st.st_mode) - if hasattr(os, 'utime'): - os.utime(dst, (st.st_atime, st.st_mtime)) - if hasattr(os, 'chmod'): - os.chmod(dst, mode) - if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): - try: - os.chflags(dst, st.st_flags) - except OSError as why: - if (not hasattr(errno, 'EOPNOTSUPP') or - why.errno != errno.EOPNOTSUPP): - raise - -def copy(src, dst): - """Copy data and mode bits ("cp src dst"). - - The destination may be a directory. - - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - copyfile(src, dst) - copymode(src, dst) - -def copy2(src, dst): - """Copy data and all stat info ("cp -p src dst"). - - The destination may be a directory. - - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - copyfile(src, dst) - copystat(src, dst) - -def ignore_patterns(*patterns): - """Function that can be used as copytree() ignore parameter. - - Patterns is a sequence of glob-style patterns - that are used to exclude files""" - def _ignore_patterns(path, names): - ignored_names = [] - for pattern in patterns: - ignored_names.extend(fnmatch.filter(names, pattern)) - return set(ignored_names) - return _ignore_patterns - -def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, - ignore_dangling_symlinks=False): - """Recursively copy a directory tree. - - The destination directory must not already exist. - If exception(s) occur, an Error is raised with a list of reasons. - - If the optional symlinks flag is true, symbolic links in the - source tree result in symbolic links in the destination tree; if - it is false, the contents of the files pointed to by symbolic - links are copied. If the file pointed by the symlink doesn't - exist, an exception will be added in the list of errors raised in - an Error exception at the end of the copy process. - - You can set the optional ignore_dangling_symlinks flag to true if you - want to silence this exception. Notice that this has no effect on - platforms that don't support os.symlink. - - The optional ignore argument is a callable. If given, it - is called with the `src` parameter, which is the directory - being visited by copytree(), and `names` which is the list of - `src` contents, as returned by os.listdir(): - - callable(src, names) -> ignored_names - - Since copytree() is called recursively, the callable will be - called once for each directory that is copied. It returns a - list of names relative to the `src` directory that should - not be copied. - - The optional copy_function argument is a callable that will be used - to copy each file. It will be called with the source path and the - destination path as arguments. By default, copy2() is used, but any - function that supports the same signature (like copy()) can be used. - - """ - names = os.listdir(src) - if ignore is not None: - ignored_names = ignore(src, names) - else: - ignored_names = set() - - os.makedirs(dst) - errors = [] - for name in names: - if name in ignored_names: - continue - srcname = os.path.join(src, name) - dstname = os.path.join(dst, name) - try: - if os.path.islink(srcname): - linkto = os.readlink(srcname) - if symlinks: - os.symlink(linkto, dstname) - else: - # ignore dangling symlink if the flag is on - if not os.path.exists(linkto) and ignore_dangling_symlinks: - continue - # otherwise let the copy occurs. copy2 will raise an error - copy_function(srcname, dstname) - elif os.path.isdir(srcname): - copytree(srcname, dstname, symlinks, ignore, copy_function) - else: - # Will raise a SpecialFileError for unsupported file types - copy_function(srcname, dstname) - # catch the Error from the recursive copytree so that we can - # continue with other files - except Error as err: - errors.extend(err.args[0]) - except EnvironmentError as why: - errors.append((srcname, dstname, str(why))) - try: - copystat(src, dst) - except OSError as why: - if WindowsError is not None and isinstance(why, WindowsError): - # Copying file access times may fail on Windows - pass - else: - errors.extend((src, dst, str(why))) - if errors: - raise Error(errors) - -def rmtree(path, ignore_errors=False, onerror=None): - """Recursively delete a directory tree. - - If ignore_errors is set, errors are ignored; otherwise, if onerror - is set, it is called to handle the error with arguments (func, - path, exc_info) where func is os.listdir, os.remove, or os.rmdir; - path is the argument to that function that caused it to fail; and - exc_info is a tuple returned by sys.exc_info(). If ignore_errors - is false and onerror is None, an exception is raised. - - """ - if ignore_errors: - def onerror(*args): - pass - elif onerror is None: - def onerror(*args): - raise - try: - if os.path.islink(path): - # symlinks to directories are forbidden, see bug #1669 - raise OSError("Cannot call rmtree on a symbolic link") - except OSError: - onerror(os.path.islink, path, sys.exc_info()) - # can't continue even if onerror hook returns - return - names = [] - try: - names = os.listdir(path) - except os.error: - onerror(os.listdir, path, sys.exc_info()) - for name in names: - fullname = os.path.join(path, name) - try: - mode = os.lstat(fullname).st_mode - except os.error: - mode = 0 - if stat.S_ISDIR(mode): - rmtree(fullname, ignore_errors, onerror) - else: - try: - os.remove(fullname) - except os.error: - onerror(os.remove, fullname, sys.exc_info()) - try: - os.rmdir(path) - except os.error: - onerror(os.rmdir, path, sys.exc_info()) - - -def _basename(path): - # A basename() variant which first strips the trailing slash, if present. - # Thus we always get the last component of the path, even for directories. - return os.path.basename(path.rstrip(os.path.sep)) - -def move(src, dst): - """Recursively move a file or directory to another location. This is - similar to the Unix "mv" command. - - If the destination is a directory or a symlink to a directory, the source - is moved inside the directory. The destination path must not already - exist. - - If the destination already exists but is not a directory, it may be - overwritten depending on os.rename() semantics. - - If the destination is on our current filesystem, then rename() is used. - Otherwise, src is copied to the destination and then removed. - A lot more could be done here... A look at a mv.c shows a lot of - the issues this implementation glosses over. - - """ - real_dst = dst - if os.path.isdir(dst): - if _samefile(src, dst): - # We might be on a case insensitive filesystem, - # perform the rename anyway. - os.rename(src, dst) - return - - real_dst = os.path.join(dst, _basename(src)) - if os.path.exists(real_dst): - raise Error("Destination path '%s' already exists" % real_dst) - try: - os.rename(src, real_dst) - except OSError: - if os.path.isdir(src): - if _destinsrc(src, dst): - raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) - copytree(src, real_dst, symlinks=True) - rmtree(src) - else: - copy2(src, real_dst) - os.unlink(src) - -def _destinsrc(src, dst): - src = abspath(src) - dst = abspath(dst) - if not src.endswith(os.path.sep): - src += os.path.sep - if not dst.endswith(os.path.sep): - dst += os.path.sep - return dst.startswith(src) - -def _get_gid(name): - """Returns a gid, given a group name.""" - if getgrnam is None or name is None: - return None - try: - result = getgrnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _get_uid(name): - """Returns an uid, given a user name.""" - if getpwnam is None or name is None: - return None - try: - result = getpwnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, - owner=None, group=None, logger=None): - """Create a (possibly compressed) tar file from all the files under - 'base_dir'. - - 'compress' must be "gzip" (the default), "bzip2", or None. - - 'owner' and 'group' can be used to define an owner and a group for the - archive that is being built. If not provided, the current owner and group - will be used. - - The output tar file will be named 'base_name' + ".tar", possibly plus - the appropriate compression extension (".gz", or ".bz2"). - - Returns the output filename. - """ - tar_compression = {'gzip': 'gz', None: ''} - compress_ext = {'gzip': '.gz'} - - if _BZ2_SUPPORTED: - tar_compression['bzip2'] = 'bz2' - compress_ext['bzip2'] = '.bz2' - - # flags for compression program, each element of list will be an argument - if compress is not None and compress not in compress_ext: - raise ValueError("bad value for 'compress', or compression format not " - "supported : {0}".format(compress)) - - archive_name = base_name + '.tar' + compress_ext.get(compress, '') - archive_dir = os.path.dirname(archive_name) - - if not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - # creating the tarball - if logger is not None: - logger.info('Creating tar archive') - - uid = _get_uid(owner) - gid = _get_gid(group) - - def _set_uid_gid(tarinfo): - if gid is not None: - tarinfo.gid = gid - tarinfo.gname = group - if uid is not None: - tarinfo.uid = uid - tarinfo.uname = owner - return tarinfo - - if not dry_run: - tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) - try: - tar.add(base_dir, filter=_set_uid_gid) - finally: - tar.close() - - return archive_name - -def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): - # XXX see if we want to keep an external call here - if verbose: - zipoptions = "-r" - else: - zipoptions = "-rq" - from distutils.errors import DistutilsExecError - from distutils.spawn import spawn - try: - spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) - except DistutilsExecError: - # XXX really should distinguish between "couldn't find - # external 'zip' command" and "zip failed". - raise ExecError("unable to create zip file '%s': " - "could neither import the 'zipfile' module nor " - "find a standalone zip utility") % zip_filename - -def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): - """Create a zip file from all the files under 'base_dir'. - - The output zip file will be named 'base_name' + ".zip". Uses either the - "zipfile" Python module (if available) or the InfoZIP "zip" utility - (if installed and found on the default search path). If neither tool is - available, raises ExecError. Returns the name of the output zip - file. - """ - zip_filename = base_name + ".zip" - archive_dir = os.path.dirname(base_name) - - if not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - # If zipfile module is not available, try spawning an external 'zip' - # command. - try: - import zipfile - except ImportError: - zipfile = None - - if zipfile is None: - _call_external_zip(base_dir, zip_filename, verbose, dry_run) - else: - if logger is not None: - logger.info("creating '%s' and adding '%s' to it", - zip_filename, base_dir) - - if not dry_run: - zip = zipfile.ZipFile(zip_filename, "w", - compression=zipfile.ZIP_DEFLATED) - - for dirpath, dirnames, filenames in os.walk(base_dir): - for name in filenames: - path = os.path.normpath(os.path.join(dirpath, name)) - if os.path.isfile(path): - zip.write(path, path) - if logger is not None: - logger.info("adding '%s'", path) - zip.close() - - return zip_filename - -_ARCHIVE_FORMATS = { - 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), - 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), - 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), - 'zip': (_make_zipfile, [], "ZIP file"), - } - -if _BZ2_SUPPORTED: - _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], - "bzip2'ed tar-file") - -def get_archive_formats(): - """Returns a list of supported formats for archiving and unarchiving. - - Each element of the returned sequence is a tuple (name, description) - """ - formats = [(name, registry[2]) for name, registry in - _ARCHIVE_FORMATS.items()] - formats.sort() - return formats - -def register_archive_format(name, function, extra_args=None, description=''): - """Registers an archive format. - - name is the name of the format. function is the callable that will be - used to create archives. If provided, extra_args is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_archive_formats() function. - """ - if extra_args is None: - extra_args = [] - if not isinstance(function, Callable): - raise TypeError('The %s object is not callable' % function) - if not isinstance(extra_args, (tuple, list)): - raise TypeError('extra_args needs to be a sequence') - for element in extra_args: - if not isinstance(element, (tuple, list)) or len(element) !=2: - raise TypeError('extra_args elements are : (arg_name, value)') - - _ARCHIVE_FORMATS[name] = (function, extra_args, description) - -def unregister_archive_format(name): - del _ARCHIVE_FORMATS[name] - -def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, - dry_run=0, owner=None, group=None, logger=None): - """Create an archive file (eg. zip or tar). - - 'base_name' is the name of the file to create, minus any format-specific - extension; 'format' is the archive format: one of "zip", "tar", "bztar" - or "gztar". - - 'root_dir' is a directory that will be the root directory of the - archive; ie. we typically chdir into 'root_dir' before creating the - archive. 'base_dir' is the directory where we start archiving from; - ie. 'base_dir' will be the common prefix of all files and - directories in the archive. 'root_dir' and 'base_dir' both default - to the current directory. Returns the name of the archive file. - - 'owner' and 'group' are used when creating a tar archive. By default, - uses the current owner and group. - """ - save_cwd = os.getcwd() - if root_dir is not None: - if logger is not None: - logger.debug("changing into '%s'", root_dir) - base_name = os.path.abspath(base_name) - if not dry_run: - os.chdir(root_dir) - - if base_dir is None: - base_dir = os.curdir - - kwargs = {'dry_run': dry_run, 'logger': logger} - - try: - format_info = _ARCHIVE_FORMATS[format] - except KeyError: - raise ValueError("unknown archive format '%s'" % format) - - func = format_info[0] - for arg, val in format_info[1]: - kwargs[arg] = val - - if format != 'zip': - kwargs['owner'] = owner - kwargs['group'] = group - - try: - filename = func(base_name, base_dir, **kwargs) - finally: - if root_dir is not None: - if logger is not None: - logger.debug("changing back to '%s'", save_cwd) - os.chdir(save_cwd) - - return filename - - -def get_unpack_formats(): - """Returns a list of supported formats for unpacking. - - Each element of the returned sequence is a tuple - (name, extensions, description) - """ - formats = [(name, info[0], info[3]) for name, info in - _UNPACK_FORMATS.items()] - formats.sort() - return formats - -def _check_unpack_options(extensions, function, extra_args): - """Checks what gets registered as an unpacker.""" - # first make sure no other unpacker is registered for this extension - existing_extensions = {} - for name, info in _UNPACK_FORMATS.items(): - for ext in info[0]: - existing_extensions[ext] = name - - for extension in extensions: - if extension in existing_extensions: - msg = '%s is already registered for "%s"' - raise RegistryError(msg % (extension, - existing_extensions[extension])) - - if not isinstance(function, Callable): - raise TypeError('The registered function must be a callable') - - -def register_unpack_format(name, extensions, function, extra_args=None, - description=''): - """Registers an unpack format. - - `name` is the name of the format. `extensions` is a list of extensions - corresponding to the format. - - `function` is the callable that will be - used to unpack archives. The callable will receive archives to unpack. - If it's unable to handle an archive, it needs to raise a ReadError - exception. - - If provided, `extra_args` is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_unpack_formats() function. - """ - if extra_args is None: - extra_args = [] - _check_unpack_options(extensions, function, extra_args) - _UNPACK_FORMATS[name] = extensions, function, extra_args, description - -def unregister_unpack_format(name): - """Removes the pack format from the registry.""" - del _UNPACK_FORMATS[name] - -def _ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - if not os.path.isdir(dirname): - os.makedirs(dirname) - -def _unpack_zipfile(filename, extract_dir): - """Unpack zip `filename` to `extract_dir` - """ - try: - import zipfile - except ImportError: - raise ReadError('zlib not supported, cannot unpack this archive.') - - if not zipfile.is_zipfile(filename): - raise ReadError("%s is not a zip file" % filename) - - zip = zipfile.ZipFile(filename) - try: - for info in zip.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name: - continue - - target = os.path.join(extract_dir, *name.split('/')) - if not target: - continue - - _ensure_directory(target) - if not name.endswith('/'): - # file - data = zip.read(info.filename) - f = open(target, 'wb') - try: - f.write(data) - finally: - f.close() - del data - finally: - zip.close() - -def _unpack_tarfile(filename, extract_dir): - """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` - """ - try: - tarobj = tarfile.open(filename) - except tarfile.TarError: - raise ReadError( - "%s is not a compressed or uncompressed tar file" % filename) - try: - tarobj.extractall(extract_dir) - finally: - tarobj.close() - -_UNPACK_FORMATS = { - 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), - 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), - 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") - } - -if _BZ2_SUPPORTED: - _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], - "bzip2'ed tar-file") - -def _find_unpack_format(filename): - for name, info in _UNPACK_FORMATS.items(): - for extension in info[0]: - if filename.endswith(extension): - return name - return None - -def unpack_archive(filename, extract_dir=None, format=None): - """Unpack an archive. - - `filename` is the name of the archive. - - `extract_dir` is the name of the target directory, where the archive - is unpacked. If not provided, the current working directory is used. - - `format` is the archive format: one of "zip", "tar", or "gztar". Or any - other registered format. If not provided, unpack_archive will use the - filename extension and see if an unpacker was registered for that - extension. - - In case none is found, a ValueError is raised. - """ - if extract_dir is None: - extract_dir = os.getcwd() - - if format is not None: - try: - format_info = _UNPACK_FORMATS[format] - except KeyError: - raise ValueError("Unknown unpack format '{0}'".format(format)) - - func = format_info[1] - func(filename, extract_dir, **dict(format_info[2])) - else: - # we need to look at the registered unpackers supported extensions - format = _find_unpack_format(filename) - if format is None: - raise ReadError("Unknown archive format '{0}'".format(filename)) - - func = _UNPACK_FORMATS[format][1] - kwargs = dict(_UNPACK_FORMATS[format][2]) - func(filename, extract_dir, **kwargs) diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg b/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg deleted file mode 100644 index 1746bd01c1ad915b728ab58b4668c82b2bd578e1..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg +++ /dev/null @@ -1,84 +0,0 @@ -[posix_prefix] -# Configuration directories. Some of these come straight out of the -# configure script. They are for implementing the other variables, not to -# be used directly in [resource_locations]. -confdir = /etc -datadir = /usr/share -libdir = /usr/lib -statedir = /var -# User resource directory -local = ~/.local/{distribution.name} - -stdlib = {base}/lib/python{py_version_short} -platstdlib = {platbase}/lib/python{py_version_short} -purelib = {base}/lib/python{py_version_short}/site-packages -platlib = {platbase}/lib/python{py_version_short}/site-packages -include = {base}/include/python{py_version_short}{abiflags} -platinclude = {platbase}/include/python{py_version_short}{abiflags} -data = {base} - -[posix_home] -stdlib = {base}/lib/python -platstdlib = {base}/lib/python -purelib = {base}/lib/python -platlib = {base}/lib/python -include = {base}/include/python -platinclude = {base}/include/python -scripts = {base}/bin -data = {base} - -[nt] -stdlib = {base}/Lib -platstdlib = {base}/Lib -purelib = {base}/Lib/site-packages -platlib = {base}/Lib/site-packages -include = {base}/Include -platinclude = {base}/Include -scripts = {base}/Scripts -data = {base} - -[os2] -stdlib = {base}/Lib -platstdlib = {base}/Lib -purelib = {base}/Lib/site-packages -platlib = {base}/Lib/site-packages -include = {base}/Include -platinclude = {base}/Include -scripts = {base}/Scripts -data = {base} - -[os2_home] -stdlib = {userbase}/lib/python{py_version_short} -platstdlib = {userbase}/lib/python{py_version_short} -purelib = {userbase}/lib/python{py_version_short}/site-packages -platlib = {userbase}/lib/python{py_version_short}/site-packages -include = {userbase}/include/python{py_version_short} -scripts = {userbase}/bin -data = {userbase} - -[nt_user] -stdlib = {userbase}/Python{py_version_nodot} -platstdlib = {userbase}/Python{py_version_nodot} -purelib = {userbase}/Python{py_version_nodot}/site-packages -platlib = {userbase}/Python{py_version_nodot}/site-packages -include = {userbase}/Python{py_version_nodot}/Include -scripts = {userbase}/Scripts -data = {userbase} - -[posix_user] -stdlib = {userbase}/lib/python{py_version_short} -platstdlib = {userbase}/lib/python{py_version_short} -purelib = {userbase}/lib/python{py_version_short}/site-packages -platlib = {userbase}/lib/python{py_version_short}/site-packages -include = {userbase}/include/python{py_version_short} -scripts = {userbase}/bin -data = {userbase} - -[osx_framework_user] -stdlib = {userbase}/lib/python -platstdlib = {userbase}/lib/python -purelib = {userbase}/lib/python/site-packages -platlib = {userbase}/lib/python/site-packages -include = {userbase}/include -scripts = {userbase}/bin -data = {userbase} diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py b/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py deleted file mode 100644 index b470a373c8664325d0d00c367c0a93e926f468fa..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py +++ /dev/null @@ -1,786 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Access to Python's configuration information.""" - -import codecs -import os -import re -import sys -from os.path import pardir, realpath -try: - import configparser -except ImportError: - import ConfigParser as configparser - - -__all__ = [ - 'get_config_h_filename', - 'get_config_var', - 'get_config_vars', - 'get_makefile_filename', - 'get_path', - 'get_path_names', - 'get_paths', - 'get_platform', - 'get_python_version', - 'get_scheme_names', - 'parse_config_h', -] - - -def _safe_realpath(path): - try: - return realpath(path) - except OSError: - return path - - -if sys.executable: - _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) -else: - # sys.executable can be empty if argv[0] has been changed and Python is - # unable to retrieve the real program name - _PROJECT_BASE = _safe_realpath(os.getcwd()) - -if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) -# PC/VS7.1 -if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) -# PC/AMD64 -if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) - - -def is_python_build(): - for fn in ("Setup.dist", "Setup.local"): - if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): - return True - return False - -_PYTHON_BUILD = is_python_build() - -_cfg_read = False - -def _ensure_cfg_read(): - global _cfg_read - if not _cfg_read: - from ..resources import finder - backport_package = __name__.rsplit('.', 1)[0] - _finder = finder(backport_package) - _cfgfile = _finder.find('sysconfig.cfg') - assert _cfgfile, 'sysconfig.cfg exists' - with _cfgfile.as_stream() as s: - _SCHEMES.readfp(s) - if _PYTHON_BUILD: - for scheme in ('posix_prefix', 'posix_home'): - _SCHEMES.set(scheme, 'include', '{srcdir}/Include') - _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') - - _cfg_read = True - - -_SCHEMES = configparser.RawConfigParser() -_VAR_REPL = re.compile(r'\{([^{]*?)\}') - -def _expand_globals(config): - _ensure_cfg_read() - if config.has_section('globals'): - globals = config.items('globals') - else: - globals = tuple() - - sections = config.sections() - for section in sections: - if section == 'globals': - continue - for option, value in globals: - if config.has_option(section, option): - continue - config.set(section, option, value) - config.remove_section('globals') - - # now expanding local variables defined in the cfg file - # - for section in config.sections(): - variables = dict(config.items(section)) - - def _replacer(matchobj): - name = matchobj.group(1) - if name in variables: - return variables[name] - return matchobj.group(0) - - for option, value in config.items(section): - config.set(section, option, _VAR_REPL.sub(_replacer, value)) - -#_expand_globals(_SCHEMES) - -_PY_VERSION = '%s.%s.%s' % sys.version_info[:3] -_PY_VERSION_SHORT = '%s.%s' % sys.version_info[:2] -_PY_VERSION_SHORT_NO_DOT = '%s%s' % sys.version_info[:2] -_PREFIX = os.path.normpath(sys.prefix) -_EXEC_PREFIX = os.path.normpath(sys.exec_prefix) -_CONFIG_VARS = None -_USER_BASE = None - - -def _subst_vars(path, local_vars): - """In the string `path`, replace tokens like {some.thing} with the - corresponding value from the map `local_vars`. - - If there is no corresponding value, leave the token unchanged. - """ - def _replacer(matchobj): - name = matchobj.group(1) - if name in local_vars: - return local_vars[name] - elif name in os.environ: - return os.environ[name] - return matchobj.group(0) - return _VAR_REPL.sub(_replacer, path) - - -def _extend_dict(target_dict, other_dict): - target_keys = target_dict.keys() - for key, value in other_dict.items(): - if key in target_keys: - continue - target_dict[key] = value - - -def _expand_vars(scheme, vars): - res = {} - if vars is None: - vars = {} - _extend_dict(vars, get_config_vars()) - - for key, value in _SCHEMES.items(scheme): - if os.name in ('posix', 'nt'): - value = os.path.expanduser(value) - res[key] = os.path.normpath(_subst_vars(value, vars)) - return res - - -def format_value(value, vars): - def _replacer(matchobj): - name = matchobj.group(1) - if name in vars: - return vars[name] - return matchobj.group(0) - return _VAR_REPL.sub(_replacer, value) - - -def _get_default_scheme(): - if os.name == 'posix': - # the default scheme for posix is posix_prefix - return 'posix_prefix' - return os.name - - -def _getuserbase(): - env_base = os.environ.get("PYTHONUSERBASE", None) - - def joinuser(*args): - return os.path.expanduser(os.path.join(*args)) - - # what about 'os2emx', 'riscos' ? - if os.name == "nt": - base = os.environ.get("APPDATA") or "~" - if env_base: - return env_base - else: - return joinuser(base, "Python") - - if sys.platform == "darwin": - framework = get_config_var("PYTHONFRAMEWORK") - if framework: - if env_base: - return env_base - else: - return joinuser("~", "Library", framework, "%d.%d" % - sys.version_info[:2]) - - if env_base: - return env_base - else: - return joinuser("~", ".local") - - -def _parse_makefile(filename, vars=None): - """Parse a Makefile-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - # Regexes needed for parsing Makefile (and similar syntaxes, - # like old-style Setup files). - _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") - _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") - _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") - - if vars is None: - vars = {} - done = {} - notdone = {} - - with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: - lines = f.readlines() - - for line in lines: - if line.startswith('#') or line.strip() == '': - continue - m = _variable_rx.match(line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # do variable interpolation here - variables = list(notdone.keys()) - - # Variables with a 'PY_' prefix in the makefile. These need to - # be made available without that prefix through sysconfig. - # Special care is needed to ensure that variable expansion works, even - # if the expansion uses the name without a prefix. - renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') - - while len(variables) > 0: - for name in tuple(variables): - value = notdone[name] - m = _findvar1_rx.search(value) or _findvar2_rx.search(value) - if m is not None: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - - elif n in renamed_variables: - if (name.startswith('PY_') and - name[3:] in renamed_variables): - item = "" - - elif 'PY_' + n in notdone: - found = False - - else: - item = str(done['PY_' + n]) - - else: - done[n] = item = "" - - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: - value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - variables.remove(name) - - if (name.startswith('PY_') and - name[3:] in renamed_variables): - - name = name[3:] - if name not in done: - done[name] = value - - else: - # bogus variable reference (e.g. "prefix=$/opt/python"); - # just drop it since we can't deal - done[name] = value - variables.remove(name) - - # strip spurious spaces - for k, v in done.items(): - if isinstance(v, str): - done[k] = v.strip() - - # save the results in the global dictionary - vars.update(done) - return vars - - -def get_makefile_filename(): - """Return the path of the Makefile.""" - if _PYTHON_BUILD: - return os.path.join(_PROJECT_BASE, "Makefile") - if hasattr(sys, 'abiflags'): - config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) - else: - config_dir_name = 'config' - return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') - - -def _init_posix(vars): - """Initialize the module as appropriate for POSIX systems.""" - # load the installed Makefile: - makefile = get_makefile_filename() - try: - _parse_makefile(makefile, vars) - except IOError as e: - msg = "invalid Python installation: unable to open %s" % makefile - if hasattr(e, "strerror"): - msg = msg + " (%s)" % e.strerror - raise IOError(msg) - # load the installed pyconfig.h: - config_h = get_config_h_filename() - try: - with open(config_h) as f: - parse_config_h(f, vars) - except IOError as e: - msg = "invalid Python installation: unable to open %s" % config_h - if hasattr(e, "strerror"): - msg = msg + " (%s)" % e.strerror - raise IOError(msg) - # On AIX, there are wrong paths to the linker scripts in the Makefile - # -- these paths are relative to the Python source, but when installed - # the scripts are in another directory. - if _PYTHON_BUILD: - vars['LDSHARED'] = vars['BLDSHARED'] - - -def _init_non_posix(vars): - """Initialize the module as appropriate for NT""" - # set basic install directories - vars['LIBDEST'] = get_path('stdlib') - vars['BINLIBDEST'] = get_path('platstdlib') - vars['INCLUDEPY'] = get_path('include') - vars['SO'] = '.pyd' - vars['EXE'] = '.exe' - vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT - vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) - -# -# public APIs -# - - -def parse_config_h(fp, vars=None): - """Parse a config.h-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - if vars is None: - vars = {} - define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") - undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") - - while True: - line = fp.readline() - if not line: - break - m = define_rx.match(line) - if m: - n, v = m.group(1, 2) - try: - v = int(v) - except ValueError: - pass - vars[n] = v - else: - m = undef_rx.match(line) - if m: - vars[m.group(1)] = 0 - return vars - - -def get_config_h_filename(): - """Return the path of pyconfig.h.""" - if _PYTHON_BUILD: - if os.name == "nt": - inc_dir = os.path.join(_PROJECT_BASE, "PC") - else: - inc_dir = _PROJECT_BASE - else: - inc_dir = get_path('platinclude') - return os.path.join(inc_dir, 'pyconfig.h') - - -def get_scheme_names(): - """Return a tuple containing the schemes names.""" - return tuple(sorted(_SCHEMES.sections())) - - -def get_path_names(): - """Return a tuple containing the paths names.""" - # xxx see if we want a static list - return _SCHEMES.options('posix_prefix') - - -def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): - """Return a mapping containing an install scheme. - - ``scheme`` is the install scheme name. If not provided, it will - return the default scheme for the current platform. - """ - _ensure_cfg_read() - if expand: - return _expand_vars(scheme, vars) - else: - return dict(_SCHEMES.items(scheme)) - - -def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): - """Return a path corresponding to the scheme. - - ``scheme`` is the install scheme name. - """ - return get_paths(scheme, vars, expand)[name] - - -def get_config_vars(*args): - """With no arguments, return a dictionary of all configuration - variables relevant for the current platform. - - On Unix, this means every variable defined in Python's installed Makefile; - On Windows and Mac OS it's a much smaller set. - - With arguments, return a list of values that result from looking up - each argument in the configuration variable dictionary. - """ - global _CONFIG_VARS - if _CONFIG_VARS is None: - _CONFIG_VARS = {} - # Normalized versions of prefix and exec_prefix are handy to have; - # in fact, these are the standard versions used most places in the - # distutils2 module. - _CONFIG_VARS['prefix'] = _PREFIX - _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX - _CONFIG_VARS['py_version'] = _PY_VERSION - _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT - _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] - _CONFIG_VARS['base'] = _PREFIX - _CONFIG_VARS['platbase'] = _EXEC_PREFIX - _CONFIG_VARS['projectbase'] = _PROJECT_BASE - try: - _CONFIG_VARS['abiflags'] = sys.abiflags - except AttributeError: - # sys.abiflags may not be defined on all platforms. - _CONFIG_VARS['abiflags'] = '' - - if os.name in ('nt', 'os2'): - _init_non_posix(_CONFIG_VARS) - if os.name == 'posix': - _init_posix(_CONFIG_VARS) - # Setting 'userbase' is done below the call to the - # init function to enable using 'get_config_var' in - # the init-function. - if sys.version >= '2.6': - _CONFIG_VARS['userbase'] = _getuserbase() - - if 'srcdir' not in _CONFIG_VARS: - _CONFIG_VARS['srcdir'] = _PROJECT_BASE - else: - _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) - - # Convert srcdir into an absolute path if it appears necessary. - # Normally it is relative to the build directory. However, during - # testing, for example, we might be running a non-installed python - # from a different directory. - if _PYTHON_BUILD and os.name == "posix": - base = _PROJECT_BASE - try: - cwd = os.getcwd() - except OSError: - cwd = None - if (not os.path.isabs(_CONFIG_VARS['srcdir']) and - base != cwd): - # srcdir is relative and we are not in the same directory - # as the executable. Assume executable is in the build - # directory and make srcdir absolute. - srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) - _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) - - if sys.platform == 'darwin': - kernel_version = os.uname()[2] # Kernel version (8.4.3) - major_version = int(kernel_version.split('.')[0]) - - if major_version < 8: - # On Mac OS X before 10.4, check if -arch and -isysroot - # are in CFLAGS or LDFLAGS and remove them if they are. - # This is needed when building extensions on a 10.3 system - # using a universal build of python. - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - flags = _CONFIG_VARS[key] - flags = re.sub(r'-arch\s+\w+\s', ' ', flags) - flags = re.sub('-isysroot [^ \t]*', ' ', flags) - _CONFIG_VARS[key] = flags - else: - # Allow the user to override the architecture flags using - # an environment variable. - # NOTE: This name was introduced by Apple in OSX 10.5 and - # is used by several scripting languages distributed with - # that OS release. - if 'ARCHFLAGS' in os.environ: - arch = os.environ['ARCHFLAGS'] - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _CONFIG_VARS[key] - flags = re.sub(r'-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch - _CONFIG_VARS[key] = flags - - # If we're on OSX 10.5 or later and the user tries to - # compiles an extension using an SDK that is not present - # on the current machine it is better to not use an SDK - # than to fail. - # - # The major usecase for this is users using a Python.org - # binary installer on OSX 10.6: that installer uses - # the 10.4u SDK, but that SDK is not installed by default - # when you install Xcode. - # - CFLAGS = _CONFIG_VARS.get('CFLAGS', '') - m = re.search(r'-isysroot\s+(\S+)', CFLAGS) - if m is not None: - sdk = m.group(1) - if not os.path.exists(sdk): - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _CONFIG_VARS[key] - flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags) - _CONFIG_VARS[key] = flags - - if args: - vals = [] - for name in args: - vals.append(_CONFIG_VARS.get(name)) - return vals - else: - return _CONFIG_VARS - - -def get_config_var(name): - """Return the value of a single variable using the dictionary returned by - 'get_config_vars()'. - - Equivalent to get_config_vars().get(name) - """ - return get_config_vars().get(name) - - -def get_platform(): - """Return a string that identifies the current platform. - - This is used mainly to distinguish platform-specific build directories and - platform-specific built distributions. Typically includes the OS name - and version and the architecture (as supplied by 'os.uname()'), - although the exact information included depends on the OS; eg. for IRIX - the architecture isn't particularly important (IRIX only runs on SGI - hardware), but for Linux the kernel version isn't particularly - important. - - Examples of returned values: - linux-i586 - linux-alpha (?) - solaris-2.6-sun4u - irix-5.3 - irix64-6.2 - - Windows will return one of: - win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) - win-ia64 (64bit Windows on Itanium) - win32 (all others - specifically, sys.platform is returned) - - For other non-POSIX platforms, currently just returns 'sys.platform'. - """ - if os.name == 'nt': - # sniff sys.version for architecture. - prefix = " bit (" - i = sys.version.find(prefix) - if i == -1: - return sys.platform - j = sys.version.find(")", i) - look = sys.version[i+len(prefix):j].lower() - if look == 'amd64': - return 'win-amd64' - if look == 'itanium': - return 'win-ia64' - return sys.platform - - if os.name != "posix" or not hasattr(os, 'uname'): - # XXX what about the architecture? NT is Intel or Alpha, - # Mac OS is M68k or PPC, etc. - return sys.platform - - # Try to distinguish various flavours of Unix - osname, host, release, version, machine = os.uname() - - # Convert the OS name to lowercase, remove '/' characters - # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") - osname = osname.lower().replace('/', '') - machine = machine.replace(' ', '_') - machine = machine.replace('/', '-') - - if osname[:5] == "linux": - # At least on Linux/Intel, 'machine' is the processor -- - # i386, etc. - # XXX what about Alpha, SPARC, etc? - return "%s-%s" % (osname, machine) - elif osname[:5] == "sunos": - if release[0] >= "5": # SunOS 5 == Solaris 2 - osname = "solaris" - release = "%d.%s" % (int(release[0]) - 3, release[2:]) - # fall through to standard osname-release-machine representation - elif osname[:4] == "irix": # could be "irix64"! - return "%s-%s" % (osname, release) - elif osname[:3] == "aix": - return "%s-%s.%s" % (osname, version, release) - elif osname[:6] == "cygwin": - osname = "cygwin" - rel_re = re.compile(r'[\d.]+') - m = rel_re.match(release) - if m: - release = m.group() - elif osname[:6] == "darwin": - # - # For our purposes, we'll assume that the system version from - # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set - # to. This makes the compatibility story a bit more sane because the - # machine is going to compile and link as if it were - # MACOSX_DEPLOYMENT_TARGET. - cfgvars = get_config_vars() - macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') - - if True: - # Always calculate the release of the running machine, - # needed to determine if we can build fat binaries or not. - - macrelease = macver - # Get the system version. Reading this plist is a documented - # way to get the system version (see the documentation for - # the Gestalt Manager) - try: - f = open('/System/Library/CoreServices/SystemVersion.plist') - except IOError: - # We're on a plain darwin box, fall back to the default - # behaviour. - pass - else: - try: - m = re.search(r'<key>ProductUserVisibleVersion</key>\s*' - r'<string>(.*?)</string>', f.read()) - finally: - f.close() - if m is not None: - macrelease = '.'.join(m.group(1).split('.')[:2]) - # else: fall back to the default behaviour - - if not macver: - macver = macrelease - - if macver: - release = macver - osname = "macosx" - - if ((macrelease + '.') >= '10.4.' and - '-arch' in get_config_vars().get('CFLAGS', '').strip()): - # The universal build will build fat binaries, but not on - # systems before 10.4 - # - # Try to detect 4-way universal builds, those have machine-type - # 'universal' instead of 'fat'. - - machine = 'fat' - cflags = get_config_vars().get('CFLAGS') - - archs = re.findall(r'-arch\s+(\S+)', cflags) - archs = tuple(sorted(set(archs))) - - if len(archs) == 1: - machine = archs[0] - elif archs == ('i386', 'ppc'): - machine = 'fat' - elif archs == ('i386', 'x86_64'): - machine = 'intel' - elif archs == ('i386', 'ppc', 'x86_64'): - machine = 'fat3' - elif archs == ('ppc64', 'x86_64'): - machine = 'fat64' - elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): - machine = 'universal' - else: - raise ValueError( - "Don't know machine value for archs=%r" % (archs,)) - - elif machine == 'i386': - # On OSX the machine type returned by uname is always the - # 32-bit variant, even if the executable architecture is - # the 64-bit variant - if sys.maxsize >= 2**32: - machine = 'x86_64' - - elif machine in ('PowerPC', 'Power_Macintosh'): - # Pick a sane name for the PPC architecture. - # See 'i386' case - if sys.maxsize >= 2**32: - machine = 'ppc64' - else: - machine = 'ppc' - - return "%s-%s-%s" % (osname, release, machine) - - -def get_python_version(): - return _PY_VERSION_SHORT - - -def _print_dict(title, data): - for index, (key, value) in enumerate(sorted(data.items())): - if index == 0: - print('%s: ' % (title)) - print('\t%s = "%s"' % (key, value)) - - -def _main(): - """Display all information sysconfig detains.""" - print('Platform: "%s"' % get_platform()) - print('Python version: "%s"' % get_python_version()) - print('Current installation scheme: "%s"' % _get_default_scheme()) - print() - _print_dict('Paths', get_paths()) - print() - _print_dict('Variables', get_config_vars()) - - -if __name__ == '__main__': - _main() diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py b/env/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py deleted file mode 100644 index d66d8566374be661085213468bce338c6a747702..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py +++ /dev/null @@ -1,2607 +0,0 @@ -#------------------------------------------------------------------- -# tarfile.py -#------------------------------------------------------------------- -# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de> -# All rights reserved. -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation -# files (the "Software"), to deal in the Software without -# restriction, including without limitation the rights to use, -# copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following -# conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -from __future__ import print_function - -"""Read from and write to tar format archives. -""" - -__version__ = "$Revision$" - -version = "0.9.0" -__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" -__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" -__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" -__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." - -#--------- -# Imports -#--------- -import sys -import os -import stat -import errno -import time -import struct -import copy -import re - -try: - import grp, pwd -except ImportError: - grp = pwd = None - -# os.symlink on Windows prior to 6.0 raises NotImplementedError -symlink_exception = (AttributeError, NotImplementedError) -try: - # WindowsError (1314) will be raised if the caller does not hold the - # SeCreateSymbolicLinkPrivilege privilege - symlink_exception += (WindowsError,) -except NameError: - pass - -# from tarfile import * -__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] - -if sys.version_info[0] < 3: - import __builtin__ as builtins -else: - import builtins - -_open = builtins.open # Since 'open' is TarFile.open - -#--------------------------------------------------------- -# tar constants -#--------------------------------------------------------- -NUL = b"\0" # the null character -BLOCKSIZE = 512 # length of processing blocks -RECORDSIZE = BLOCKSIZE * 20 # length of records -GNU_MAGIC = b"ustar \0" # magic gnu tar string -POSIX_MAGIC = b"ustar\x0000" # magic posix tar string - -LENGTH_NAME = 100 # maximum length of a filename -LENGTH_LINK = 100 # maximum length of a linkname -LENGTH_PREFIX = 155 # maximum length of the prefix field - -REGTYPE = b"0" # regular file -AREGTYPE = b"\0" # regular file -LNKTYPE = b"1" # link (inside tarfile) -SYMTYPE = b"2" # symbolic link -CHRTYPE = b"3" # character special device -BLKTYPE = b"4" # block special device -DIRTYPE = b"5" # directory -FIFOTYPE = b"6" # fifo special device -CONTTYPE = b"7" # contiguous file - -GNUTYPE_LONGNAME = b"L" # GNU tar longname -GNUTYPE_LONGLINK = b"K" # GNU tar longlink -GNUTYPE_SPARSE = b"S" # GNU tar sparse file - -XHDTYPE = b"x" # POSIX.1-2001 extended header -XGLTYPE = b"g" # POSIX.1-2001 global header -SOLARIS_XHDTYPE = b"X" # Solaris extended header - -USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format -GNU_FORMAT = 1 # GNU tar format -PAX_FORMAT = 2 # POSIX.1-2001 (pax) format -DEFAULT_FORMAT = GNU_FORMAT - -#--------------------------------------------------------- -# tarfile constants -#--------------------------------------------------------- -# File types that tarfile supports: -SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, - SYMTYPE, DIRTYPE, FIFOTYPE, - CONTTYPE, CHRTYPE, BLKTYPE, - GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# File types that will be treated as a regular file. -REGULAR_TYPES = (REGTYPE, AREGTYPE, - CONTTYPE, GNUTYPE_SPARSE) - -# File types that are part of the GNU tar format. -GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# Fields from a pax header that override a TarInfo attribute. -PAX_FIELDS = ("path", "linkpath", "size", "mtime", - "uid", "gid", "uname", "gname") - -# Fields from a pax header that are affected by hdrcharset. -PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) - -# Fields in a pax header that are numbers, all other fields -# are treated as strings. -PAX_NUMBER_FIELDS = { - "atime": float, - "ctime": float, - "mtime": float, - "uid": int, - "gid": int, - "size": int -} - -#--------------------------------------------------------- -# Bits used in the mode field, values in octal. -#--------------------------------------------------------- -S_IFLNK = 0o120000 # symbolic link -S_IFREG = 0o100000 # regular file -S_IFBLK = 0o060000 # block device -S_IFDIR = 0o040000 # directory -S_IFCHR = 0o020000 # character device -S_IFIFO = 0o010000 # fifo - -TSUID = 0o4000 # set UID on execution -TSGID = 0o2000 # set GID on execution -TSVTX = 0o1000 # reserved - -TUREAD = 0o400 # read by owner -TUWRITE = 0o200 # write by owner -TUEXEC = 0o100 # execute/search by owner -TGREAD = 0o040 # read by group -TGWRITE = 0o020 # write by group -TGEXEC = 0o010 # execute/search by group -TOREAD = 0o004 # read by other -TOWRITE = 0o002 # write by other -TOEXEC = 0o001 # execute/search by other - -#--------------------------------------------------------- -# initialization -#--------------------------------------------------------- -if os.name in ("nt", "ce"): - ENCODING = "utf-8" -else: - ENCODING = sys.getfilesystemencoding() - -#--------------------------------------------------------- -# Some useful functions -#--------------------------------------------------------- - -def stn(s, length, encoding, errors): - """Convert a string to a null-terminated bytes object. - """ - s = s.encode(encoding, errors) - return s[:length] + (length - len(s)) * NUL - -def nts(s, encoding, errors): - """Convert a null-terminated bytes object to a string. - """ - p = s.find(b"\0") - if p != -1: - s = s[:p] - return s.decode(encoding, errors) - -def nti(s): - """Convert a number field to a python number. - """ - # There are two possible encodings for a number field, see - # itn() below. - if s[0] != chr(0o200): - try: - n = int(nts(s, "ascii", "strict") or "0", 8) - except ValueError: - raise InvalidHeaderError("invalid header") - else: - n = 0 - for i in range(len(s) - 1): - n <<= 8 - n += ord(s[i + 1]) - return n - -def itn(n, digits=8, format=DEFAULT_FORMAT): - """Convert a python number to a number field. - """ - # POSIX 1003.1-1988 requires numbers to be encoded as a string of - # octal digits followed by a null-byte, this allows values up to - # (8**(digits-1))-1. GNU tar allows storing numbers greater than - # that if necessary. A leading 0o200 byte indicates this particular - # encoding, the following digits-1 bytes are a big-endian - # representation. This allows values up to (256**(digits-1))-1. - if 0 <= n < 8 ** (digits - 1): - s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL - else: - if format != GNU_FORMAT or n >= 256 ** (digits - 1): - raise ValueError("overflow in number field") - - if n < 0: - # XXX We mimic GNU tar's behaviour with negative numbers, - # this could raise OverflowError. - n = struct.unpack("L", struct.pack("l", n))[0] - - s = bytearray() - for i in range(digits - 1): - s.insert(0, n & 0o377) - n >>= 8 - s.insert(0, 0o200) - return s - -def calc_chksums(buf): - """Calculate the checksum for a member's header by summing up all - characters except for the chksum field which is treated as if - it was filled with spaces. According to the GNU tar sources, - some tars (Sun and NeXT) calculate chksum with signed char, - which will be different if there are chars in the buffer with - the high bit set. So we calculate two checksums, unsigned and - signed. - """ - unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) - signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) - return unsigned_chksum, signed_chksum - -def copyfileobj(src, dst, length=None): - """Copy length bytes from fileobj src to fileobj dst. - If length is None, copy the entire content. - """ - if length == 0: - return - if length is None: - while True: - buf = src.read(16*1024) - if not buf: - break - dst.write(buf) - return - - BUFSIZE = 16 * 1024 - blocks, remainder = divmod(length, BUFSIZE) - for b in range(blocks): - buf = src.read(BUFSIZE) - if len(buf) < BUFSIZE: - raise IOError("end of file reached") - dst.write(buf) - - if remainder != 0: - buf = src.read(remainder) - if len(buf) < remainder: - raise IOError("end of file reached") - dst.write(buf) - return - -filemode_table = ( - ((S_IFLNK, "l"), - (S_IFREG, "-"), - (S_IFBLK, "b"), - (S_IFDIR, "d"), - (S_IFCHR, "c"), - (S_IFIFO, "p")), - - ((TUREAD, "r"),), - ((TUWRITE, "w"),), - ((TUEXEC|TSUID, "s"), - (TSUID, "S"), - (TUEXEC, "x")), - - ((TGREAD, "r"),), - ((TGWRITE, "w"),), - ((TGEXEC|TSGID, "s"), - (TSGID, "S"), - (TGEXEC, "x")), - - ((TOREAD, "r"),), - ((TOWRITE, "w"),), - ((TOEXEC|TSVTX, "t"), - (TSVTX, "T"), - (TOEXEC, "x")) -) - -def filemode(mode): - """Convert a file's mode to a string of the form - -rwxrwxrwx. - Used by TarFile.list() - """ - perm = [] - for table in filemode_table: - for bit, char in table: - if mode & bit == bit: - perm.append(char) - break - else: - perm.append("-") - return "".join(perm) - -class TarError(Exception): - """Base exception.""" - pass -class ExtractError(TarError): - """General exception for extract errors.""" - pass -class ReadError(TarError): - """Exception for unreadable tar archives.""" - pass -class CompressionError(TarError): - """Exception for unavailable compression methods.""" - pass -class StreamError(TarError): - """Exception for unsupported operations on stream-like TarFiles.""" - pass -class HeaderError(TarError): - """Base exception for header errors.""" - pass -class EmptyHeaderError(HeaderError): - """Exception for empty headers.""" - pass -class TruncatedHeaderError(HeaderError): - """Exception for truncated headers.""" - pass -class EOFHeaderError(HeaderError): - """Exception for end of file headers.""" - pass -class InvalidHeaderError(HeaderError): - """Exception for invalid headers.""" - pass -class SubsequentHeaderError(HeaderError): - """Exception for missing and invalid extended headers.""" - pass - -#--------------------------- -# internal stream interface -#--------------------------- -class _LowLevelFile(object): - """Low-level file object. Supports reading and writing. - It is used instead of a regular file object for streaming - access. - """ - - def __init__(self, name, mode): - mode = { - "r": os.O_RDONLY, - "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, - }[mode] - if hasattr(os, "O_BINARY"): - mode |= os.O_BINARY - self.fd = os.open(name, mode, 0o666) - - def close(self): - os.close(self.fd) - - def read(self, size): - return os.read(self.fd, size) - - def write(self, s): - os.write(self.fd, s) - -class _Stream(object): - """Class that serves as an adapter between TarFile and - a stream-like object. The stream-like object only - needs to have a read() or write() method and is accessed - blockwise. Use of gzip or bzip2 compression is possible. - A stream-like object could be for example: sys.stdin, - sys.stdout, a socket, a tape device etc. - - _Stream is intended to be used only internally. - """ - - def __init__(self, name, mode, comptype, fileobj, bufsize): - """Construct a _Stream object. - """ - self._extfileobj = True - if fileobj is None: - fileobj = _LowLevelFile(name, mode) - self._extfileobj = False - - if comptype == '*': - # Enable transparent compression detection for the - # stream interface - fileobj = _StreamProxy(fileobj) - comptype = fileobj.getcomptype() - - self.name = name or "" - self.mode = mode - self.comptype = comptype - self.fileobj = fileobj - self.bufsize = bufsize - self.buf = b"" - self.pos = 0 - self.closed = False - - try: - if comptype == "gz": - try: - import zlib - except ImportError: - raise CompressionError("zlib module is not available") - self.zlib = zlib - self.crc = zlib.crc32(b"") - if mode == "r": - self._init_read_gz() - else: - self._init_write_gz() - - if comptype == "bz2": - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") - if mode == "r": - self.dbuf = b"" - self.cmp = bz2.BZ2Decompressor() - else: - self.cmp = bz2.BZ2Compressor() - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - def __del__(self): - if hasattr(self, "closed") and not self.closed: - self.close() - - def _init_write_gz(self): - """Initialize for writing with gzip compression. - """ - self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, - -self.zlib.MAX_WBITS, - self.zlib.DEF_MEM_LEVEL, - 0) - timestamp = struct.pack("<L", int(time.time())) - self.__write(b"\037\213\010\010" + timestamp + b"\002\377") - if self.name.endswith(".gz"): - self.name = self.name[:-3] - # RFC1952 says we must use ISO-8859-1 for the FNAME field. - self.__write(self.name.encode("iso-8859-1", "replace") + NUL) - - def write(self, s): - """Write string s to the stream. - """ - if self.comptype == "gz": - self.crc = self.zlib.crc32(s, self.crc) - self.pos += len(s) - if self.comptype != "tar": - s = self.cmp.compress(s) - self.__write(s) - - def __write(self, s): - """Write string s to the stream if a whole new block - is ready to be written. - """ - self.buf += s - while len(self.buf) > self.bufsize: - self.fileobj.write(self.buf[:self.bufsize]) - self.buf = self.buf[self.bufsize:] - - def close(self): - """Close the _Stream object. No operation should be - done on it afterwards. - """ - if self.closed: - return - - if self.mode == "w" and self.comptype != "tar": - self.buf += self.cmp.flush() - - if self.mode == "w" and self.buf: - self.fileobj.write(self.buf) - self.buf = b"" - if self.comptype == "gz": - # The native zlib crc is an unsigned 32-bit integer, but - # the Python wrapper implicitly casts that to a signed C - # long. So, on a 32-bit box self.crc may "look negative", - # while the same crc on a 64-bit box may "look positive". - # To avoid irksome warnings from the `struct` module, force - # it to look positive on all boxes. - self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff)) - self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF)) - - if not self._extfileobj: - self.fileobj.close() - - self.closed = True - - def _init_read_gz(self): - """Initialize for reading a gzip compressed fileobj. - """ - self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) - self.dbuf = b"" - - # taken from gzip.GzipFile with some alterations - if self.__read(2) != b"\037\213": - raise ReadError("not a gzip file") - if self.__read(1) != b"\010": - raise CompressionError("unsupported compression method") - - flag = ord(self.__read(1)) - self.__read(6) - - if flag & 4: - xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) - self.read(xlen) - if flag & 8: - while True: - s = self.__read(1) - if not s or s == NUL: - break - if flag & 16: - while True: - s = self.__read(1) - if not s or s == NUL: - break - if flag & 2: - self.__read(2) - - def tell(self): - """Return the stream's file pointer position. - """ - return self.pos - - def seek(self, pos=0): - """Set the stream's file pointer to pos. Negative seeking - is forbidden. - """ - if pos - self.pos >= 0: - blocks, remainder = divmod(pos - self.pos, self.bufsize) - for i in range(blocks): - self.read(self.bufsize) - self.read(remainder) - else: - raise StreamError("seeking backwards is not allowed") - return self.pos - - def read(self, size=None): - """Return the next size number of bytes from the stream. - If size is not defined, return all bytes of the stream - up to EOF. - """ - if size is None: - t = [] - while True: - buf = self._read(self.bufsize) - if not buf: - break - t.append(buf) - buf = "".join(t) - else: - buf = self._read(size) - self.pos += len(buf) - return buf - - def _read(self, size): - """Return size bytes from the stream. - """ - if self.comptype == "tar": - return self.__read(size) - - c = len(self.dbuf) - while c < size: - buf = self.__read(self.bufsize) - if not buf: - break - try: - buf = self.cmp.decompress(buf) - except IOError: - raise ReadError("invalid compressed data") - self.dbuf += buf - c += len(buf) - buf = self.dbuf[:size] - self.dbuf = self.dbuf[size:] - return buf - - def __read(self, size): - """Return size bytes from stream. If internal buffer is empty, - read another block from the stream. - """ - c = len(self.buf) - while c < size: - buf = self.fileobj.read(self.bufsize) - if not buf: - break - self.buf += buf - c += len(buf) - buf = self.buf[:size] - self.buf = self.buf[size:] - return buf -# class _Stream - -class _StreamProxy(object): - """Small proxy class that enables transparent compression - detection for the Stream interface (mode 'r|*'). - """ - - def __init__(self, fileobj): - self.fileobj = fileobj - self.buf = self.fileobj.read(BLOCKSIZE) - - def read(self, size): - self.read = self.fileobj.read - return self.buf - - def getcomptype(self): - if self.buf.startswith(b"\037\213\010"): - return "gz" - if self.buf.startswith(b"BZh91"): - return "bz2" - return "tar" - - def close(self): - self.fileobj.close() -# class StreamProxy - -class _BZ2Proxy(object): - """Small proxy class that enables external file object - support for "r:bz2" and "w:bz2" modes. This is actually - a workaround for a limitation in bz2 module's BZ2File - class which (unlike gzip.GzipFile) has no support for - a file object argument. - """ - - blocksize = 16 * 1024 - - def __init__(self, fileobj, mode): - self.fileobj = fileobj - self.mode = mode - self.name = getattr(self.fileobj, "name", None) - self.init() - - def init(self): - import bz2 - self.pos = 0 - if self.mode == "r": - self.bz2obj = bz2.BZ2Decompressor() - self.fileobj.seek(0) - self.buf = b"" - else: - self.bz2obj = bz2.BZ2Compressor() - - def read(self, size): - x = len(self.buf) - while x < size: - raw = self.fileobj.read(self.blocksize) - if not raw: - break - data = self.bz2obj.decompress(raw) - self.buf += data - x += len(data) - - buf = self.buf[:size] - self.buf = self.buf[size:] - self.pos += len(buf) - return buf - - def seek(self, pos): - if pos < self.pos: - self.init() - self.read(pos - self.pos) - - def tell(self): - return self.pos - - def write(self, data): - self.pos += len(data) - raw = self.bz2obj.compress(data) - self.fileobj.write(raw) - - def close(self): - if self.mode == "w": - raw = self.bz2obj.flush() - self.fileobj.write(raw) -# class _BZ2Proxy - -#------------------------ -# Extraction file object -#------------------------ -class _FileInFile(object): - """A thin wrapper around an existing file object that - provides a part of its data as an individual file - object. - """ - - def __init__(self, fileobj, offset, size, blockinfo=None): - self.fileobj = fileobj - self.offset = offset - self.size = size - self.position = 0 - - if blockinfo is None: - blockinfo = [(0, size)] - - # Construct a map with data and zero blocks. - self.map_index = 0 - self.map = [] - lastpos = 0 - realpos = self.offset - for offset, size in blockinfo: - if offset > lastpos: - self.map.append((False, lastpos, offset, None)) - self.map.append((True, offset, offset + size, realpos)) - realpos += size - lastpos = offset + size - if lastpos < self.size: - self.map.append((False, lastpos, self.size, None)) - - def seekable(self): - if not hasattr(self.fileobj, "seekable"): - # XXX gzip.GzipFile and bz2.BZ2File - return True - return self.fileobj.seekable() - - def tell(self): - """Return the current file position. - """ - return self.position - - def seek(self, position): - """Seek to a position in the file. - """ - self.position = position - - def read(self, size=None): - """Read data from the file. - """ - if size is None: - size = self.size - self.position - else: - size = min(size, self.size - self.position) - - buf = b"" - while size > 0: - while True: - data, start, stop, offset = self.map[self.map_index] - if start <= self.position < stop: - break - else: - self.map_index += 1 - if self.map_index == len(self.map): - self.map_index = 0 - length = min(size, stop - self.position) - if data: - self.fileobj.seek(offset + (self.position - start)) - buf += self.fileobj.read(length) - else: - buf += NUL * length - size -= length - self.position += length - return buf -#class _FileInFile - - -class ExFileObject(object): - """File-like object for reading an archive member. - Is returned by TarFile.extractfile(). - """ - blocksize = 1024 - - def __init__(self, tarfile, tarinfo): - self.fileobj = _FileInFile(tarfile.fileobj, - tarinfo.offset_data, - tarinfo.size, - tarinfo.sparse) - self.name = tarinfo.name - self.mode = "r" - self.closed = False - self.size = tarinfo.size - - self.position = 0 - self.buffer = b"" - - def readable(self): - return True - - def writable(self): - return False - - def seekable(self): - return self.fileobj.seekable() - - def read(self, size=None): - """Read at most size bytes from the file. If size is not - present or None, read all data until EOF is reached. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - buf = b"" - if self.buffer: - if size is None: - buf = self.buffer - self.buffer = b"" - else: - buf = self.buffer[:size] - self.buffer = self.buffer[size:] - - if size is None: - buf += self.fileobj.read() - else: - buf += self.fileobj.read(size - len(buf)) - - self.position += len(buf) - return buf - - # XXX TextIOWrapper uses the read1() method. - read1 = read - - def readline(self, size=-1): - """Read one entire line from the file. If size is present - and non-negative, return a string with at most that - size, which may be an incomplete line. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - pos = self.buffer.find(b"\n") + 1 - if pos == 0: - # no newline found. - while True: - buf = self.fileobj.read(self.blocksize) - self.buffer += buf - if not buf or b"\n" in buf: - pos = self.buffer.find(b"\n") + 1 - if pos == 0: - # no newline found. - pos = len(self.buffer) - break - - if size != -1: - pos = min(size, pos) - - buf = self.buffer[:pos] - self.buffer = self.buffer[pos:] - self.position += len(buf) - return buf - - def readlines(self): - """Return a list with all remaining lines. - """ - result = [] - while True: - line = self.readline() - if not line: break - result.append(line) - return result - - def tell(self): - """Return the current file position. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - return self.position - - def seek(self, pos, whence=os.SEEK_SET): - """Seek to a position in the file. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - if whence == os.SEEK_SET: - self.position = min(max(pos, 0), self.size) - elif whence == os.SEEK_CUR: - if pos < 0: - self.position = max(self.position + pos, 0) - else: - self.position = min(self.position + pos, self.size) - elif whence == os.SEEK_END: - self.position = max(min(self.size + pos, self.size), 0) - else: - raise ValueError("Invalid argument") - - self.buffer = b"" - self.fileobj.seek(self.position) - - def close(self): - """Close the file object. - """ - self.closed = True - - def __iter__(self): - """Get an iterator over the file's lines. - """ - while True: - line = self.readline() - if not line: - break - yield line -#class ExFileObject - -#------------------ -# Exported Classes -#------------------ -class TarInfo(object): - """Informational class which holds the details about an - archive member given by a tar header block. - TarInfo objects are returned by TarFile.getmember(), - TarFile.getmembers() and TarFile.gettarinfo() and are - usually created internally. - """ - - __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", - "chksum", "type", "linkname", "uname", "gname", - "devmajor", "devminor", - "offset", "offset_data", "pax_headers", "sparse", - "tarfile", "_sparse_structs", "_link_target") - - def __init__(self, name=""): - """Construct a TarInfo object. name is the optional name - of the member. - """ - self.name = name # member name - self.mode = 0o644 # file permissions - self.uid = 0 # user id - self.gid = 0 # group id - self.size = 0 # file size - self.mtime = 0 # modification time - self.chksum = 0 # header checksum - self.type = REGTYPE # member type - self.linkname = "" # link name - self.uname = "" # user name - self.gname = "" # group name - self.devmajor = 0 # device major number - self.devminor = 0 # device minor number - - self.offset = 0 # the tar header starts here - self.offset_data = 0 # the file's data starts here - - self.sparse = None # sparse member information - self.pax_headers = {} # pax header information - - # In pax headers the "name" and "linkname" field are called - # "path" and "linkpath". - def _getpath(self): - return self.name - def _setpath(self, name): - self.name = name - path = property(_getpath, _setpath) - - def _getlinkpath(self): - return self.linkname - def _setlinkpath(self, linkname): - self.linkname = linkname - linkpath = property(_getlinkpath, _setlinkpath) - - def __repr__(self): - return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) - - def get_info(self): - """Return the TarInfo's attributes as a dictionary. - """ - info = { - "name": self.name, - "mode": self.mode & 0o7777, - "uid": self.uid, - "gid": self.gid, - "size": self.size, - "mtime": self.mtime, - "chksum": self.chksum, - "type": self.type, - "linkname": self.linkname, - "uname": self.uname, - "gname": self.gname, - "devmajor": self.devmajor, - "devminor": self.devminor - } - - if info["type"] == DIRTYPE and not info["name"].endswith("/"): - info["name"] += "/" - - return info - - def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): - """Return a tar header as a string of 512 byte blocks. - """ - info = self.get_info() - - if format == USTAR_FORMAT: - return self.create_ustar_header(info, encoding, errors) - elif format == GNU_FORMAT: - return self.create_gnu_header(info, encoding, errors) - elif format == PAX_FORMAT: - return self.create_pax_header(info, encoding) - else: - raise ValueError("invalid format") - - def create_ustar_header(self, info, encoding, errors): - """Return the object as a ustar header block. - """ - info["magic"] = POSIX_MAGIC - - if len(info["linkname"]) > LENGTH_LINK: - raise ValueError("linkname is too long") - - if len(info["name"]) > LENGTH_NAME: - info["prefix"], info["name"] = self._posix_split_name(info["name"]) - - return self._create_header(info, USTAR_FORMAT, encoding, errors) - - def create_gnu_header(self, info, encoding, errors): - """Return the object as a GNU header block sequence. - """ - info["magic"] = GNU_MAGIC - - buf = b"" - if len(info["linkname"]) > LENGTH_LINK: - buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) - - if len(info["name"]) > LENGTH_NAME: - buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) - - return buf + self._create_header(info, GNU_FORMAT, encoding, errors) - - def create_pax_header(self, info, encoding): - """Return the object as a ustar header block. If it cannot be - represented this way, prepend a pax extended header sequence - with supplement information. - """ - info["magic"] = POSIX_MAGIC - pax_headers = self.pax_headers.copy() - - # Test string fields for values that exceed the field length or cannot - # be represented in ASCII encoding. - for name, hname, length in ( - ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), - ("uname", "uname", 32), ("gname", "gname", 32)): - - if hname in pax_headers: - # The pax header has priority. - continue - - # Try to encode the string as ASCII. - try: - info[name].encode("ascii", "strict") - except UnicodeEncodeError: - pax_headers[hname] = info[name] - continue - - if len(info[name]) > length: - pax_headers[hname] = info[name] - - # Test number fields for values that exceed the field limit or values - # that like to be stored as float. - for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): - if name in pax_headers: - # The pax header has priority. Avoid overflow. - info[name] = 0 - continue - - val = info[name] - if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): - pax_headers[name] = str(val) - info[name] = 0 - - # Create a pax extended header if necessary. - if pax_headers: - buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) - else: - buf = b"" - - return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") - - @classmethod - def create_pax_global_header(cls, pax_headers): - """Return the object as a pax global header block sequence. - """ - return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") - - def _posix_split_name(self, name): - """Split a name longer than 100 chars into a prefix - and a name part. - """ - prefix = name[:LENGTH_PREFIX + 1] - while prefix and prefix[-1] != "/": - prefix = prefix[:-1] - - name = name[len(prefix):] - prefix = prefix[:-1] - - if not prefix or len(name) > LENGTH_NAME: - raise ValueError("name is too long") - return prefix, name - - @staticmethod - def _create_header(info, format, encoding, errors): - """Return a header block. info is a dictionary with file - information, format must be one of the *_FORMAT constants. - """ - parts = [ - stn(info.get("name", ""), 100, encoding, errors), - itn(info.get("mode", 0) & 0o7777, 8, format), - itn(info.get("uid", 0), 8, format), - itn(info.get("gid", 0), 8, format), - itn(info.get("size", 0), 12, format), - itn(info.get("mtime", 0), 12, format), - b" ", # checksum field - info.get("type", REGTYPE), - stn(info.get("linkname", ""), 100, encoding, errors), - info.get("magic", POSIX_MAGIC), - stn(info.get("uname", ""), 32, encoding, errors), - stn(info.get("gname", ""), 32, encoding, errors), - itn(info.get("devmajor", 0), 8, format), - itn(info.get("devminor", 0), 8, format), - stn(info.get("prefix", ""), 155, encoding, errors) - ] - - buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) - chksum = calc_chksums(buf[-BLOCKSIZE:])[0] - buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] - return buf - - @staticmethod - def _create_payload(payload): - """Return the string payload filled with zero bytes - up to the next 512 byte border. - """ - blocks, remainder = divmod(len(payload), BLOCKSIZE) - if remainder > 0: - payload += (BLOCKSIZE - remainder) * NUL - return payload - - @classmethod - def _create_gnu_long_header(cls, name, type, encoding, errors): - """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence - for name. - """ - name = name.encode(encoding, errors) + NUL - - info = {} - info["name"] = "././@LongLink" - info["type"] = type - info["size"] = len(name) - info["magic"] = GNU_MAGIC - - # create extended header + name blocks. - return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ - cls._create_payload(name) - - @classmethod - def _create_pax_generic_header(cls, pax_headers, type, encoding): - """Return a POSIX.1-2008 extended or global header sequence - that contains a list of keyword, value pairs. The values - must be strings. - """ - # Check if one of the fields contains surrogate characters and thereby - # forces hdrcharset=BINARY, see _proc_pax() for more information. - binary = False - for keyword, value in pax_headers.items(): - try: - value.encode("utf8", "strict") - except UnicodeEncodeError: - binary = True - break - - records = b"" - if binary: - # Put the hdrcharset field at the beginning of the header. - records += b"21 hdrcharset=BINARY\n" - - for keyword, value in pax_headers.items(): - keyword = keyword.encode("utf8") - if binary: - # Try to restore the original byte representation of `value'. - # Needless to say, that the encoding must match the string. - value = value.encode(encoding, "surrogateescape") - else: - value = value.encode("utf8") - - l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' - n = p = 0 - while True: - n = l + len(str(p)) - if n == p: - break - p = n - records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" - - # We use a hardcoded "././@PaxHeader" name like star does - # instead of the one that POSIX recommends. - info = {} - info["name"] = "././@PaxHeader" - info["type"] = type - info["size"] = len(records) - info["magic"] = POSIX_MAGIC - - # Create pax header + record blocks. - return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ - cls._create_payload(records) - - @classmethod - def frombuf(cls, buf, encoding, errors): - """Construct a TarInfo object from a 512 byte bytes object. - """ - if len(buf) == 0: - raise EmptyHeaderError("empty header") - if len(buf) != BLOCKSIZE: - raise TruncatedHeaderError("truncated header") - if buf.count(NUL) == BLOCKSIZE: - raise EOFHeaderError("end of file header") - - chksum = nti(buf[148:156]) - if chksum not in calc_chksums(buf): - raise InvalidHeaderError("bad checksum") - - obj = cls() - obj.name = nts(buf[0:100], encoding, errors) - obj.mode = nti(buf[100:108]) - obj.uid = nti(buf[108:116]) - obj.gid = nti(buf[116:124]) - obj.size = nti(buf[124:136]) - obj.mtime = nti(buf[136:148]) - obj.chksum = chksum - obj.type = buf[156:157] - obj.linkname = nts(buf[157:257], encoding, errors) - obj.uname = nts(buf[265:297], encoding, errors) - obj.gname = nts(buf[297:329], encoding, errors) - obj.devmajor = nti(buf[329:337]) - obj.devminor = nti(buf[337:345]) - prefix = nts(buf[345:500], encoding, errors) - - # Old V7 tar format represents a directory as a regular - # file with a trailing slash. - if obj.type == AREGTYPE and obj.name.endswith("/"): - obj.type = DIRTYPE - - # The old GNU sparse format occupies some of the unused - # space in the buffer for up to 4 sparse structures. - # Save the them for later processing in _proc_sparse(). - if obj.type == GNUTYPE_SPARSE: - pos = 386 - structs = [] - for i in range(4): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[482]) - origsize = nti(buf[483:495]) - obj._sparse_structs = (structs, isextended, origsize) - - # Remove redundant slashes from directories. - if obj.isdir(): - obj.name = obj.name.rstrip("/") - - # Reconstruct a ustar longname. - if prefix and obj.type not in GNU_TYPES: - obj.name = prefix + "/" + obj.name - return obj - - @classmethod - def fromtarfile(cls, tarfile): - """Return the next TarInfo object from TarFile object - tarfile. - """ - buf = tarfile.fileobj.read(BLOCKSIZE) - obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) - obj.offset = tarfile.fileobj.tell() - BLOCKSIZE - return obj._proc_member(tarfile) - - #-------------------------------------------------------------------------- - # The following are methods that are called depending on the type of a - # member. The entry point is _proc_member() which can be overridden in a - # subclass to add custom _proc_*() methods. A _proc_*() method MUST - # implement the following - # operations: - # 1. Set self.offset_data to the position where the data blocks begin, - # if there is data that follows. - # 2. Set tarfile.offset to the position where the next member's header will - # begin. - # 3. Return self or another valid TarInfo object. - def _proc_member(self, tarfile): - """Choose the right processing method depending on - the type and call it. - """ - if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): - return self._proc_gnulong(tarfile) - elif self.type == GNUTYPE_SPARSE: - return self._proc_sparse(tarfile) - elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): - return self._proc_pax(tarfile) - else: - return self._proc_builtin(tarfile) - - def _proc_builtin(self, tarfile): - """Process a builtin type or an unknown type which - will be treated as a regular file. - """ - self.offset_data = tarfile.fileobj.tell() - offset = self.offset_data - if self.isreg() or self.type not in SUPPORTED_TYPES: - # Skip the following data blocks. - offset += self._block(self.size) - tarfile.offset = offset - - # Patch the TarInfo object with saved global - # header information. - self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) - - return self - - def _proc_gnulong(self, tarfile): - """Process the blocks that hold a GNU longname - or longlink member. - """ - buf = tarfile.fileobj.read(self._block(self.size)) - - # Fetch the next header and process it. - try: - next = self.fromtarfile(tarfile) - except HeaderError: - raise SubsequentHeaderError("missing or bad subsequent header") - - # Patch the TarInfo object from the next header with - # the longname information. - next.offset = self.offset - if self.type == GNUTYPE_LONGNAME: - next.name = nts(buf, tarfile.encoding, tarfile.errors) - elif self.type == GNUTYPE_LONGLINK: - next.linkname = nts(buf, tarfile.encoding, tarfile.errors) - - return next - - def _proc_sparse(self, tarfile): - """Process a GNU sparse header plus extra headers. - """ - # We already collected some sparse structures in frombuf(). - structs, isextended, origsize = self._sparse_structs - del self._sparse_structs - - # Collect sparse structures from extended header blocks. - while isextended: - buf = tarfile.fileobj.read(BLOCKSIZE) - pos = 0 - for i in range(21): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - if offset and numbytes: - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[504]) - self.sparse = structs - - self.offset_data = tarfile.fileobj.tell() - tarfile.offset = self.offset_data + self._block(self.size) - self.size = origsize - return self - - def _proc_pax(self, tarfile): - """Process an extended or global header as described in - POSIX.1-2008. - """ - # Read the header information. - buf = tarfile.fileobj.read(self._block(self.size)) - - # A pax header stores supplemental information for either - # the following file (extended) or all following files - # (global). - if self.type == XGLTYPE: - pax_headers = tarfile.pax_headers - else: - pax_headers = tarfile.pax_headers.copy() - - # Check if the pax header contains a hdrcharset field. This tells us - # the encoding of the path, linkpath, uname and gname fields. Normally, - # these fields are UTF-8 encoded but since POSIX.1-2008 tar - # implementations are allowed to store them as raw binary strings if - # the translation to UTF-8 fails. - match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) - if match is not None: - pax_headers["hdrcharset"] = match.group(1).decode("utf8") - - # For the time being, we don't care about anything other than "BINARY". - # The only other value that is currently allowed by the standard is - # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. - hdrcharset = pax_headers.get("hdrcharset") - if hdrcharset == "BINARY": - encoding = tarfile.encoding - else: - encoding = "utf8" - - # Parse pax header information. A record looks like that: - # "%d %s=%s\n" % (length, keyword, value). length is the size - # of the complete record including the length field itself and - # the newline. keyword and value are both UTF-8 encoded strings. - regex = re.compile(br"(\d+) ([^=]+)=") - pos = 0 - while True: - match = regex.match(buf, pos) - if not match: - break - - length, keyword = match.groups() - length = int(length) - value = buf[match.end(2) + 1:match.start(1) + length - 1] - - # Normally, we could just use "utf8" as the encoding and "strict" - # as the error handler, but we better not take the risk. For - # example, GNU tar <= 1.23 is known to store filenames it cannot - # translate to UTF-8 as raw strings (unfortunately without a - # hdrcharset=BINARY header). - # We first try the strict standard encoding, and if that fails we - # fall back on the user's encoding and error handler. - keyword = self._decode_pax_field(keyword, "utf8", "utf8", - tarfile.errors) - if keyword in PAX_NAME_FIELDS: - value = self._decode_pax_field(value, encoding, tarfile.encoding, - tarfile.errors) - else: - value = self._decode_pax_field(value, "utf8", "utf8", - tarfile.errors) - - pax_headers[keyword] = value - pos += length - - # Fetch the next header. - try: - next = self.fromtarfile(tarfile) - except HeaderError: - raise SubsequentHeaderError("missing or bad subsequent header") - - # Process GNU sparse information. - if "GNU.sparse.map" in pax_headers: - # GNU extended sparse format version 0.1. - self._proc_gnusparse_01(next, pax_headers) - - elif "GNU.sparse.size" in pax_headers: - # GNU extended sparse format version 0.0. - self._proc_gnusparse_00(next, pax_headers, buf) - - elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": - # GNU extended sparse format version 1.0. - self._proc_gnusparse_10(next, pax_headers, tarfile) - - if self.type in (XHDTYPE, SOLARIS_XHDTYPE): - # Patch the TarInfo object with the extended header info. - next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) - next.offset = self.offset - - if "size" in pax_headers: - # If the extended header replaces the size field, - # we need to recalculate the offset where the next - # header starts. - offset = next.offset_data - if next.isreg() or next.type not in SUPPORTED_TYPES: - offset += next._block(next.size) - tarfile.offset = offset - - return next - - def _proc_gnusparse_00(self, next, pax_headers, buf): - """Process a GNU tar extended sparse header, version 0.0. - """ - offsets = [] - for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): - offsets.append(int(match.group(1))) - numbytes = [] - for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): - numbytes.append(int(match.group(1))) - next.sparse = list(zip(offsets, numbytes)) - - def _proc_gnusparse_01(self, next, pax_headers): - """Process a GNU tar extended sparse header, version 0.1. - """ - sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _proc_gnusparse_10(self, next, pax_headers, tarfile): - """Process a GNU tar extended sparse header, version 1.0. - """ - fields = None - sparse = [] - buf = tarfile.fileobj.read(BLOCKSIZE) - fields, buf = buf.split(b"\n", 1) - fields = int(fields) - while len(sparse) < fields * 2: - if b"\n" not in buf: - buf += tarfile.fileobj.read(BLOCKSIZE) - number, buf = buf.split(b"\n", 1) - sparse.append(int(number)) - next.offset_data = tarfile.fileobj.tell() - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _apply_pax_info(self, pax_headers, encoding, errors): - """Replace fields with supplemental information from a previous - pax extended or global header. - """ - for keyword, value in pax_headers.items(): - if keyword == "GNU.sparse.name": - setattr(self, "path", value) - elif keyword == "GNU.sparse.size": - setattr(self, "size", int(value)) - elif keyword == "GNU.sparse.realsize": - setattr(self, "size", int(value)) - elif keyword in PAX_FIELDS: - if keyword in PAX_NUMBER_FIELDS: - try: - value = PAX_NUMBER_FIELDS[keyword](value) - except ValueError: - value = 0 - if keyword == "path": - value = value.rstrip("/") - setattr(self, keyword, value) - - self.pax_headers = pax_headers.copy() - - def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): - """Decode a single field from a pax record. - """ - try: - return value.decode(encoding, "strict") - except UnicodeDecodeError: - return value.decode(fallback_encoding, fallback_errors) - - def _block(self, count): - """Round up a byte count by BLOCKSIZE and return it, - e.g. _block(834) => 1024. - """ - blocks, remainder = divmod(count, BLOCKSIZE) - if remainder: - blocks += 1 - return blocks * BLOCKSIZE - - def isreg(self): - return self.type in REGULAR_TYPES - def isfile(self): - return self.isreg() - def isdir(self): - return self.type == DIRTYPE - def issym(self): - return self.type == SYMTYPE - def islnk(self): - return self.type == LNKTYPE - def ischr(self): - return self.type == CHRTYPE - def isblk(self): - return self.type == BLKTYPE - def isfifo(self): - return self.type == FIFOTYPE - def issparse(self): - return self.sparse is not None - def isdev(self): - return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) -# class TarInfo - -class TarFile(object): - """The TarFile Class provides an interface to tar archives. - """ - - debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) - - dereference = False # If true, add content of linked file to the - # tar file, else the link. - - ignore_zeros = False # If true, skips empty or invalid blocks and - # continues processing. - - errorlevel = 1 # If 0, fatal errors only appear in debug - # messages (if debug >= 0). If > 0, errors - # are passed to the caller as exceptions. - - format = DEFAULT_FORMAT # The format to use when creating an archive. - - encoding = ENCODING # Encoding for 8-bit character strings. - - errors = None # Error handler for unicode conversion. - - tarinfo = TarInfo # The default TarInfo class to use. - - fileobject = ExFileObject # The default ExFileObject class to use. - - def __init__(self, name=None, mode="r", fileobj=None, format=None, - tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, - errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): - """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to - read from an existing archive, 'a' to append data to an existing - file or 'w' to create a new file overwriting an existing one. `mode' - defaults to 'r'. - If `fileobj' is given, it is used for reading or writing data. If it - can be determined, `mode' is overridden by `fileobj's mode. - `fileobj' is not closed, when TarFile is closed. - """ - if len(mode) > 1 or mode not in "raw": - raise ValueError("mode must be 'r', 'a' or 'w'") - self.mode = mode - self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] - - if not fileobj: - if self.mode == "a" and not os.path.exists(name): - # Create nonexistent files in append mode. - self.mode = "w" - self._mode = "wb" - fileobj = bltn_open(name, self._mode) - self._extfileobj = False - else: - if name is None and hasattr(fileobj, "name"): - name = fileobj.name - if hasattr(fileobj, "mode"): - self._mode = fileobj.mode - self._extfileobj = True - self.name = os.path.abspath(name) if name else None - self.fileobj = fileobj - - # Init attributes. - if format is not None: - self.format = format - if tarinfo is not None: - self.tarinfo = tarinfo - if dereference is not None: - self.dereference = dereference - if ignore_zeros is not None: - self.ignore_zeros = ignore_zeros - if encoding is not None: - self.encoding = encoding - self.errors = errors - - if pax_headers is not None and self.format == PAX_FORMAT: - self.pax_headers = pax_headers - else: - self.pax_headers = {} - - if debug is not None: - self.debug = debug - if errorlevel is not None: - self.errorlevel = errorlevel - - # Init datastructures. - self.closed = False - self.members = [] # list of members as TarInfo objects - self._loaded = False # flag if all members have been read - self.offset = self.fileobj.tell() - # current position in the archive file - self.inodes = {} # dictionary caching the inodes of - # archive members already added - - try: - if self.mode == "r": - self.firstmember = None - self.firstmember = self.next() - - if self.mode == "a": - # Move to the end of the archive, - # before the first empty block. - while True: - self.fileobj.seek(self.offset) - try: - tarinfo = self.tarinfo.fromtarfile(self) - self.members.append(tarinfo) - except EOFHeaderError: - self.fileobj.seek(self.offset) - break - except HeaderError as e: - raise ReadError(str(e)) - - if self.mode in "aw": - self._loaded = True - - if self.pax_headers: - buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) - self.fileobj.write(buf) - self.offset += len(buf) - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - #-------------------------------------------------------------------------- - # Below are the classmethods which act as alternate constructors to the - # TarFile class. The open() method is the only one that is needed for - # public use; it is the "super"-constructor and is able to select an - # adequate "sub"-constructor for a particular compression using the mapping - # from OPEN_METH. - # - # This concept allows one to subclass TarFile without losing the comfort of - # the super-constructor. A sub-constructor is registered and made available - # by adding it to the mapping in OPEN_METH. - - @classmethod - def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): - """Open a tar archive for reading, writing or appending. Return - an appropriate TarFile class. - - mode: - 'r' or 'r:*' open for reading with transparent compression - 'r:' open for reading exclusively uncompressed - 'r:gz' open for reading with gzip compression - 'r:bz2' open for reading with bzip2 compression - 'a' or 'a:' open for appending, creating the file if necessary - 'w' or 'w:' open for writing without compression - 'w:gz' open for writing with gzip compression - 'w:bz2' open for writing with bzip2 compression - - 'r|*' open a stream of tar blocks with transparent compression - 'r|' open an uncompressed stream of tar blocks for reading - 'r|gz' open a gzip compressed stream of tar blocks - 'r|bz2' open a bzip2 compressed stream of tar blocks - 'w|' open an uncompressed stream for writing - 'w|gz' open a gzip compressed stream for writing - 'w|bz2' open a bzip2 compressed stream for writing - """ - - if not name and not fileobj: - raise ValueError("nothing to open") - - if mode in ("r", "r:*"): - # Find out which *open() is appropriate for opening the file. - for comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - if fileobj is not None: - saved_pos = fileobj.tell() - try: - return func(name, "r", fileobj, **kwargs) - except (ReadError, CompressionError) as e: - if fileobj is not None: - fileobj.seek(saved_pos) - continue - raise ReadError("file could not be opened successfully") - - elif ":" in mode: - filemode, comptype = mode.split(":", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - # Select the *open() function according to - # given compression. - if comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - else: - raise CompressionError("unknown compression type %r" % comptype) - return func(name, filemode, fileobj, **kwargs) - - elif "|" in mode: - filemode, comptype = mode.split("|", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - if filemode not in "rw": - raise ValueError("mode must be 'r' or 'w'") - - stream = _Stream(name, filemode, comptype, fileobj, bufsize) - try: - t = cls(name, filemode, stream, **kwargs) - except: - stream.close() - raise - t._extfileobj = False - return t - - elif mode in "aw": - return cls.taropen(name, mode, fileobj, **kwargs) - - raise ValueError("undiscernible mode") - - @classmethod - def taropen(cls, name, mode="r", fileobj=None, **kwargs): - """Open uncompressed tar archive name for reading or writing. - """ - if len(mode) > 1 or mode not in "raw": - raise ValueError("mode must be 'r', 'a' or 'w'") - return cls(name, mode, fileobj, **kwargs) - - @classmethod - def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open gzip compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'") - - try: - import gzip - gzip.GzipFile - except (ImportError, AttributeError): - raise CompressionError("gzip module is not available") - - extfileobj = fileobj is not None - try: - fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) - t = cls.taropen(name, mode, fileobj, **kwargs) - except IOError: - if not extfileobj and fileobj is not None: - fileobj.close() - if fileobj is None: - raise - raise ReadError("not a gzip file") - except: - if not extfileobj and fileobj is not None: - fileobj.close() - raise - t._extfileobj = extfileobj - return t - - @classmethod - def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open bzip2 compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'.") - - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") - - if fileobj is not None: - fileobj = _BZ2Proxy(fileobj, mode) - else: - fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) - - try: - t = cls.taropen(name, mode, fileobj, **kwargs) - except (IOError, EOFError): - fileobj.close() - raise ReadError("not a bzip2 file") - t._extfileobj = False - return t - - # All *open() methods are registered here. - OPEN_METH = { - "tar": "taropen", # uncompressed tar - "gz": "gzopen", # gzip compressed tar - "bz2": "bz2open" # bzip2 compressed tar - } - - #-------------------------------------------------------------------------- - # The public methods which TarFile provides: - - def close(self): - """Close the TarFile. In write-mode, two finishing zero blocks are - appended to the archive. - """ - if self.closed: - return - - if self.mode in "aw": - self.fileobj.write(NUL * (BLOCKSIZE * 2)) - self.offset += (BLOCKSIZE * 2) - # fill up the end with zero-blocks - # (like option -b20 for tar does) - blocks, remainder = divmod(self.offset, RECORDSIZE) - if remainder > 0: - self.fileobj.write(NUL * (RECORDSIZE - remainder)) - - if not self._extfileobj: - self.fileobj.close() - self.closed = True - - def getmember(self, name): - """Return a TarInfo object for member `name'. If `name' can not be - found in the archive, KeyError is raised. If a member occurs more - than once in the archive, its last occurrence is assumed to be the - most up-to-date version. - """ - tarinfo = self._getmember(name) - if tarinfo is None: - raise KeyError("filename %r not found" % name) - return tarinfo - - def getmembers(self): - """Return the members of the archive as a list of TarInfo objects. The - list has the same order as the members in the archive. - """ - self._check() - if not self._loaded: # if we want to obtain a list of - self._load() # all members, we first have to - # scan the whole archive. - return self.members - - def getnames(self): - """Return the members of the archive as a list of their names. It has - the same order as the list returned by getmembers(). - """ - return [tarinfo.name for tarinfo in self.getmembers()] - - def gettarinfo(self, name=None, arcname=None, fileobj=None): - """Create a TarInfo object for either the file `name' or the file - object `fileobj' (using os.fstat on its file descriptor). You can - modify some of the TarInfo's attributes before you add it using - addfile(). If given, `arcname' specifies an alternative name for the - file in the archive. - """ - self._check("aw") - - # When fileobj is given, replace name by - # fileobj's real name. - if fileobj is not None: - name = fileobj.name - - # Building the name of the member in the archive. - # Backward slashes are converted to forward slashes, - # Absolute paths are turned to relative paths. - if arcname is None: - arcname = name - drv, arcname = os.path.splitdrive(arcname) - arcname = arcname.replace(os.sep, "/") - arcname = arcname.lstrip("/") - - # Now, fill the TarInfo object with - # information specific for the file. - tarinfo = self.tarinfo() - tarinfo.tarfile = self - - # Use os.stat or os.lstat, depending on platform - # and if symlinks shall be resolved. - if fileobj is None: - if hasattr(os, "lstat") and not self.dereference: - statres = os.lstat(name) - else: - statres = os.stat(name) - else: - statres = os.fstat(fileobj.fileno()) - linkname = "" - - stmd = statres.st_mode - if stat.S_ISREG(stmd): - inode = (statres.st_ino, statres.st_dev) - if not self.dereference and statres.st_nlink > 1 and \ - inode in self.inodes and arcname != self.inodes[inode]: - # Is it a hardlink to an already - # archived file? - type = LNKTYPE - linkname = self.inodes[inode] - else: - # The inode is added only if its valid. - # For win32 it is always 0. - type = REGTYPE - if inode[0]: - self.inodes[inode] = arcname - elif stat.S_ISDIR(stmd): - type = DIRTYPE - elif stat.S_ISFIFO(stmd): - type = FIFOTYPE - elif stat.S_ISLNK(stmd): - type = SYMTYPE - linkname = os.readlink(name) - elif stat.S_ISCHR(stmd): - type = CHRTYPE - elif stat.S_ISBLK(stmd): - type = BLKTYPE - else: - return None - - # Fill the TarInfo object with all - # information we can get. - tarinfo.name = arcname - tarinfo.mode = stmd - tarinfo.uid = statres.st_uid - tarinfo.gid = statres.st_gid - if type == REGTYPE: - tarinfo.size = statres.st_size - else: - tarinfo.size = 0 - tarinfo.mtime = statres.st_mtime - tarinfo.type = type - tarinfo.linkname = linkname - if pwd: - try: - tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] - except KeyError: - pass - if grp: - try: - tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] - except KeyError: - pass - - if type in (CHRTYPE, BLKTYPE): - if hasattr(os, "major") and hasattr(os, "minor"): - tarinfo.devmajor = os.major(statres.st_rdev) - tarinfo.devminor = os.minor(statres.st_rdev) - return tarinfo - - def list(self, verbose=True): - """Print a table of contents to sys.stdout. If `verbose' is False, only - the names of the members are printed. If it is True, an `ls -l'-like - output is produced. - """ - self._check() - - for tarinfo in self: - if verbose: - print(filemode(tarinfo.mode), end=' ') - print("%s/%s" % (tarinfo.uname or tarinfo.uid, - tarinfo.gname or tarinfo.gid), end=' ') - if tarinfo.ischr() or tarinfo.isblk(): - print("%10s" % ("%d,%d" \ - % (tarinfo.devmajor, tarinfo.devminor)), end=' ') - else: - print("%10d" % tarinfo.size, end=' ') - print("%d-%02d-%02d %02d:%02d:%02d" \ - % time.localtime(tarinfo.mtime)[:6], end=' ') - - print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') - - if verbose: - if tarinfo.issym(): - print("->", tarinfo.linkname, end=' ') - if tarinfo.islnk(): - print("link to", tarinfo.linkname, end=' ') - print() - - def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): - """Add the file `name' to the archive. `name' may be any type of file - (directory, fifo, symbolic link, etc.). If given, `arcname' - specifies an alternative name for the file in the archive. - Directories are added recursively by default. This can be avoided by - setting `recursive' to False. `exclude' is a function that should - return True for each filename to be excluded. `filter' is a function - that expects a TarInfo object argument and returns the changed - TarInfo object, if it returns None the TarInfo object will be - excluded from the archive. - """ - self._check("aw") - - if arcname is None: - arcname = name - - # Exclude pathnames. - if exclude is not None: - import warnings - warnings.warn("use the filter argument instead", - DeprecationWarning, 2) - if exclude(name): - self._dbg(2, "tarfile: Excluded %r" % name) - return - - # Skip if somebody tries to archive the archive... - if self.name is not None and os.path.abspath(name) == self.name: - self._dbg(2, "tarfile: Skipped %r" % name) - return - - self._dbg(1, name) - - # Create a TarInfo object from the file. - tarinfo = self.gettarinfo(name, arcname) - - if tarinfo is None: - self._dbg(1, "tarfile: Unsupported type %r" % name) - return - - # Change or exclude the TarInfo object. - if filter is not None: - tarinfo = filter(tarinfo) - if tarinfo is None: - self._dbg(2, "tarfile: Excluded %r" % name) - return - - # Append the tar header and data to the archive. - if tarinfo.isreg(): - f = bltn_open(name, "rb") - self.addfile(tarinfo, f) - f.close() - - elif tarinfo.isdir(): - self.addfile(tarinfo) - if recursive: - for f in os.listdir(name): - self.add(os.path.join(name, f), os.path.join(arcname, f), - recursive, exclude, filter=filter) - - else: - self.addfile(tarinfo) - - def addfile(self, tarinfo, fileobj=None): - """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is - given, tarinfo.size bytes are read from it and added to the archive. - You can create TarInfo objects using gettarinfo(). - On Windows platforms, `fileobj' should always be opened with mode - 'rb' to avoid irritation about the file size. - """ - self._check("aw") - - tarinfo = copy.copy(tarinfo) - - buf = tarinfo.tobuf(self.format, self.encoding, self.errors) - self.fileobj.write(buf) - self.offset += len(buf) - - # If there's data to follow, append it. - if fileobj is not None: - copyfileobj(fileobj, self.fileobj, tarinfo.size) - blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) - if remainder > 0: - self.fileobj.write(NUL * (BLOCKSIZE - remainder)) - blocks += 1 - self.offset += blocks * BLOCKSIZE - - self.members.append(tarinfo) - - def extractall(self, path=".", members=None): - """Extract all members from the archive to the current working - directory and set owner, modification time and permissions on - directories afterwards. `path' specifies a different directory - to extract to. `members' is optional and must be a subset of the - list returned by getmembers(). - """ - directories = [] - - if members is None: - members = self - - for tarinfo in members: - if tarinfo.isdir(): - # Extract directories with a safe mode. - directories.append(tarinfo) - tarinfo = copy.copy(tarinfo) - tarinfo.mode = 0o700 - # Do not set_attrs directories, as we will do that further down - self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) - - # Reverse sort directories. - directories.sort(key=lambda a: a.name) - directories.reverse() - - # Set correct owner, mtime and filemode on directories. - for tarinfo in directories: - dirpath = os.path.join(path, tarinfo.name) - try: - self.chown(tarinfo, dirpath) - self.utime(tarinfo, dirpath) - self.chmod(tarinfo, dirpath) - except ExtractError as e: - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - def extract(self, member, path="", set_attrs=True): - """Extract a member from the archive to the current working directory, - using its full name. Its file information is extracted as accurately - as possible. `member' may be a filename or a TarInfo object. You can - specify a different directory using `path'. File attributes (owner, - mtime, mode) are set unless `set_attrs' is False. - """ - self._check("r") - - if isinstance(member, str): - tarinfo = self.getmember(member) - else: - tarinfo = member - - # Prepare the link target for makelink(). - if tarinfo.islnk(): - tarinfo._link_target = os.path.join(path, tarinfo.linkname) - - try: - self._extract_member(tarinfo, os.path.join(path, tarinfo.name), - set_attrs=set_attrs) - except EnvironmentError as e: - if self.errorlevel > 0: - raise - else: - if e.filename is None: - self._dbg(1, "tarfile: %s" % e.strerror) - else: - self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) - except ExtractError as e: - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - def extractfile(self, member): - """Extract a member from the archive as a file object. `member' may be - a filename or a TarInfo object. If `member' is a regular file, a - file-like object is returned. If `member' is a link, a file-like - object is constructed from the link's target. If `member' is none of - the above, None is returned. - The file-like object is read-only and provides the following - methods: read(), readline(), readlines(), seek() and tell() - """ - self._check("r") - - if isinstance(member, str): - tarinfo = self.getmember(member) - else: - tarinfo = member - - if tarinfo.isreg(): - return self.fileobject(self, tarinfo) - - elif tarinfo.type not in SUPPORTED_TYPES: - # If a member's type is unknown, it is treated as a - # regular file. - return self.fileobject(self, tarinfo) - - elif tarinfo.islnk() or tarinfo.issym(): - if isinstance(self.fileobj, _Stream): - # A small but ugly workaround for the case that someone tries - # to extract a (sym)link as a file-object from a non-seekable - # stream of tar blocks. - raise StreamError("cannot extract (sym)link as file object") - else: - # A (sym)link's file object is its target's file object. - return self.extractfile(self._find_link_target(tarinfo)) - else: - # If there's no data associated with the member (directory, chrdev, - # blkdev, etc.), return None instead of a file object. - return None - - def _extract_member(self, tarinfo, targetpath, set_attrs=True): - """Extract the TarInfo object tarinfo to a physical - file called targetpath. - """ - # Fetch the TarInfo object for the given name - # and build the destination pathname, replacing - # forward slashes to platform specific separators. - targetpath = targetpath.rstrip("/") - targetpath = targetpath.replace("/", os.sep) - - # Create all upper directories. - upperdirs = os.path.dirname(targetpath) - if upperdirs and not os.path.exists(upperdirs): - # Create directories that are not part of the archive with - # default permissions. - os.makedirs(upperdirs) - - if tarinfo.islnk() or tarinfo.issym(): - self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) - else: - self._dbg(1, tarinfo.name) - - if tarinfo.isreg(): - self.makefile(tarinfo, targetpath) - elif tarinfo.isdir(): - self.makedir(tarinfo, targetpath) - elif tarinfo.isfifo(): - self.makefifo(tarinfo, targetpath) - elif tarinfo.ischr() or tarinfo.isblk(): - self.makedev(tarinfo, targetpath) - elif tarinfo.islnk() or tarinfo.issym(): - self.makelink(tarinfo, targetpath) - elif tarinfo.type not in SUPPORTED_TYPES: - self.makeunknown(tarinfo, targetpath) - else: - self.makefile(tarinfo, targetpath) - - if set_attrs: - self.chown(tarinfo, targetpath) - if not tarinfo.issym(): - self.chmod(tarinfo, targetpath) - self.utime(tarinfo, targetpath) - - #-------------------------------------------------------------------------- - # Below are the different file methods. They are called via - # _extract_member() when extract() is called. They can be replaced in a - # subclass to implement other functionality. - - def makedir(self, tarinfo, targetpath): - """Make a directory called targetpath. - """ - try: - # Use a safe mode for the directory, the real mode is set - # later in _extract_member(). - os.mkdir(targetpath, 0o700) - except EnvironmentError as e: - if e.errno != errno.EEXIST: - raise - - def makefile(self, tarinfo, targetpath): - """Make a file called targetpath. - """ - source = self.fileobj - source.seek(tarinfo.offset_data) - target = bltn_open(targetpath, "wb") - if tarinfo.sparse is not None: - for offset, size in tarinfo.sparse: - target.seek(offset) - copyfileobj(source, target, size) - else: - copyfileobj(source, target, tarinfo.size) - target.seek(tarinfo.size) - target.truncate() - target.close() - - def makeunknown(self, tarinfo, targetpath): - """Make a file from a TarInfo object with an unknown type - at targetpath. - """ - self.makefile(tarinfo, targetpath) - self._dbg(1, "tarfile: Unknown file type %r, " \ - "extracted as regular file." % tarinfo.type) - - def makefifo(self, tarinfo, targetpath): - """Make a fifo called targetpath. - """ - if hasattr(os, "mkfifo"): - os.mkfifo(targetpath) - else: - raise ExtractError("fifo not supported by system") - - def makedev(self, tarinfo, targetpath): - """Make a character or block device called targetpath. - """ - if not hasattr(os, "mknod") or not hasattr(os, "makedev"): - raise ExtractError("special devices not supported by system") - - mode = tarinfo.mode - if tarinfo.isblk(): - mode |= stat.S_IFBLK - else: - mode |= stat.S_IFCHR - - os.mknod(targetpath, mode, - os.makedev(tarinfo.devmajor, tarinfo.devminor)) - - def makelink(self, tarinfo, targetpath): - """Make a (symbolic) link called targetpath. If it cannot be created - (platform limitation), we try to make a copy of the referenced file - instead of a link. - """ - try: - # For systems that support symbolic and hard links. - if tarinfo.issym(): - os.symlink(tarinfo.linkname, targetpath) - else: - # See extract(). - if os.path.exists(tarinfo._link_target): - os.link(tarinfo._link_target, targetpath) - else: - self._extract_member(self._find_link_target(tarinfo), - targetpath) - except symlink_exception: - if tarinfo.issym(): - linkpath = os.path.join(os.path.dirname(tarinfo.name), - tarinfo.linkname) - else: - linkpath = tarinfo.linkname - else: - try: - self._extract_member(self._find_link_target(tarinfo), - targetpath) - except KeyError: - raise ExtractError("unable to resolve link inside archive") - - def chown(self, tarinfo, targetpath): - """Set owner of targetpath according to tarinfo. - """ - if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: - # We have to be root to do so. - try: - g = grp.getgrnam(tarinfo.gname)[2] - except KeyError: - g = tarinfo.gid - try: - u = pwd.getpwnam(tarinfo.uname)[2] - except KeyError: - u = tarinfo.uid - try: - if tarinfo.issym() and hasattr(os, "lchown"): - os.lchown(targetpath, u, g) - else: - if sys.platform != "os2emx": - os.chown(targetpath, u, g) - except EnvironmentError as e: - raise ExtractError("could not change owner") - - def chmod(self, tarinfo, targetpath): - """Set file permissions of targetpath according to tarinfo. - """ - if hasattr(os, 'chmod'): - try: - os.chmod(targetpath, tarinfo.mode) - except EnvironmentError as e: - raise ExtractError("could not change mode") - - def utime(self, tarinfo, targetpath): - """Set modification time of targetpath according to tarinfo. - """ - if not hasattr(os, 'utime'): - return - try: - os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) - except EnvironmentError as e: - raise ExtractError("could not change modification time") - - #-------------------------------------------------------------------------- - def next(self): - """Return the next member of the archive as a TarInfo object, when - TarFile is opened for reading. Return None if there is no more - available. - """ - self._check("ra") - if self.firstmember is not None: - m = self.firstmember - self.firstmember = None - return m - - # Read the next block. - self.fileobj.seek(self.offset) - tarinfo = None - while True: - try: - tarinfo = self.tarinfo.fromtarfile(self) - except EOFHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - except InvalidHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - elif self.offset == 0: - raise ReadError(str(e)) - except EmptyHeaderError: - if self.offset == 0: - raise ReadError("empty file") - except TruncatedHeaderError as e: - if self.offset == 0: - raise ReadError(str(e)) - except SubsequentHeaderError as e: - raise ReadError(str(e)) - break - - if tarinfo is not None: - self.members.append(tarinfo) - else: - self._loaded = True - - return tarinfo - - #-------------------------------------------------------------------------- - # Little helper methods: - - def _getmember(self, name, tarinfo=None, normalize=False): - """Find an archive member by name from bottom to top. - If tarinfo is given, it is used as the starting point. - """ - # Ensure that all members have been loaded. - members = self.getmembers() - - # Limit the member search list up to tarinfo. - if tarinfo is not None: - members = members[:members.index(tarinfo)] - - if normalize: - name = os.path.normpath(name) - - for member in reversed(members): - if normalize: - member_name = os.path.normpath(member.name) - else: - member_name = member.name - - if name == member_name: - return member - - def _load(self): - """Read through the entire archive file and look for readable - members. - """ - while True: - tarinfo = self.next() - if tarinfo is None: - break - self._loaded = True - - def _check(self, mode=None): - """Check if TarFile is still open, and if the operation's mode - corresponds to TarFile's mode. - """ - if self.closed: - raise IOError("%s is closed" % self.__class__.__name__) - if mode is not None and self.mode not in mode: - raise IOError("bad operation for mode %r" % self.mode) - - def _find_link_target(self, tarinfo): - """Find the target member of a symlink or hardlink member in the - archive. - """ - if tarinfo.issym(): - # Always search the entire archive. - linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname - limit = None - else: - # Search the archive before the link, because a hard link is - # just a reference to an already archived file. - linkname = tarinfo.linkname - limit = tarinfo - - member = self._getmember(linkname, tarinfo=limit, normalize=True) - if member is None: - raise KeyError("linkname %r not found" % linkname) - return member - - def __iter__(self): - """Provide an iterator object. - """ - if self._loaded: - return iter(self.members) - else: - return TarIter(self) - - def _dbg(self, level, msg): - """Write debugging output to sys.stderr. - """ - if level <= self.debug: - print(msg, file=sys.stderr) - - def __enter__(self): - self._check() - return self - - def __exit__(self, type, value, traceback): - if type is None: - self.close() - else: - # An exception occurred. We must not call close() because - # it would try to write end-of-archive blocks and padding. - if not self._extfileobj: - self.fileobj.close() - self.closed = True -# class TarFile - -class TarIter(object): - """Iterator Class. - - for tarinfo in TarFile(...): - suite... - """ - - def __init__(self, tarfile): - """Construct a TarIter object. - """ - self.tarfile = tarfile - self.index = 0 - def __iter__(self): - """Return iterator object. - """ - return self - - def __next__(self): - """Return the next item using TarFile's next() method. - When all members have been read, set TarFile as _loaded. - """ - # Fix for SF #1100429: Under rare circumstances it can - # happen that getmembers() is called during iteration, - # which will cause TarIter to stop prematurely. - if not self.tarfile._loaded: - tarinfo = self.tarfile.next() - if not tarinfo: - self.tarfile._loaded = True - raise StopIteration - else: - try: - tarinfo = self.tarfile.members[self.index] - except IndexError: - raise StopIteration - self.index += 1 - return tarinfo - - next = __next__ # for Python 2.x - -#-------------------- -# exported functions -#-------------------- -def is_tarfile(name): - """Return True if name points to a tar archive that we - are able to handle, else return False. - """ - try: - t = open(name) - t.close() - return True - except TarError: - return False - -bltn_open = open -open = TarFile.open diff --git a/env/Lib/site-packages/pip/_vendor/distlib/compat.py b/env/Lib/site-packages/pip/_vendor/distlib/compat.py index c316fd973adf9352c1b3a54f8744387bebe70cee..1fe3d225acb9bf37acffafc2198dc96c7c7fd313 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/compat.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/compat.py @@ -22,7 +22,6 @@ if sys.version_info[0] < 3: # pragma: no cover from types import FileType as file_type import __builtin__ as builtins import ConfigParser as configparser - from ._backport import shutil from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, pathname2url, ContentTooShortError, splittype) @@ -48,17 +47,18 @@ if sys.version_info[0] < 3: # pragma: no cover from itertools import ifilter as filter from itertools import ifilterfalse as filterfalse - _userprog = None - def splituser(host): - """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" - global _userprog - if _userprog is None: - import re - _userprog = re.compile('^(.*)@(.*)$') + # Leaving this around for now, in case it needs resurrecting in some way + # _userprog = None + # def splituser(host): + # """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + # global _userprog + # if _userprog is None: + # import re + # _userprog = re.compile('^(.*)@(.*)$') - match = _userprog.match(host) - if match: return match.group(1, 2) - return None, host + # match = _userprog.match(host) + # if match: return match.group(1, 2) + # return None, host else: # pragma: no cover from io import StringIO @@ -68,7 +68,7 @@ else: # pragma: no cover import builtins import configparser import shutil - from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, + from urllib.parse import (urlparse, urlunparse, urljoin, quote, unquote, urlsplit, urlunsplit, splittype) from urllib.request import (urlopen, urlretrieve, Request, url2pathname, pathname2url, @@ -88,6 +88,7 @@ else: # pragma: no cover from itertools import filterfalse filter = filter + try: from ssl import match_hostname, CertificateError except ImportError: # pragma: no cover @@ -311,10 +312,8 @@ except ImportError: # pragma: no cover return 'IronPython' return 'CPython' -try: - import sysconfig -except ImportError: # pragma: no cover - from ._backport import sysconfig +import shutil +import sysconfig try: callable = callable @@ -616,18 +615,15 @@ except ImportError: # pragma: no cover try: from importlib.util import cache_from_source # Python >= 3.4 except ImportError: # pragma: no cover - try: - from imp import cache_from_source - except ImportError: # pragma: no cover - def cache_from_source(path, debug_override=None): - assert path.endswith('.py') - if debug_override is None: - debug_override = __debug__ - if debug_override: - suffix = 'c' - else: - suffix = 'o' - return path + suffix + def cache_from_source(path, debug_override=None): + assert path.endswith('.py') + if debug_override is None: + debug_override = __debug__ + if debug_override: + suffix = 'c' + else: + suffix = 'o' + return path + suffix try: from collections import OrderedDict diff --git a/env/Lib/site-packages/pip/_vendor/distlib/database.py b/env/Lib/site-packages/pip/_vendor/distlib/database.py index 0a90c300ba83967a50ca0fe7dfbcd5c1406914eb..5db5d7f507c1d150e6b36f236df7ee61c0f65581 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/database.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/database.py @@ -132,29 +132,35 @@ class DistributionPath(object): r = finder.find(entry) if not r or r.path in seen: continue - if self._include_dist and entry.endswith(DISTINFO_EXT): - possible_filenames = [METADATA_FILENAME, - WHEEL_METADATA_FILENAME, - LEGACY_METADATA_FILENAME] - for metadata_filename in possible_filenames: - metadata_path = posixpath.join(entry, metadata_filename) - pydist = finder.find(metadata_path) - if pydist: - break - else: - continue + try: + if self._include_dist and entry.endswith(DISTINFO_EXT): + possible_filenames = [METADATA_FILENAME, + WHEEL_METADATA_FILENAME, + LEGACY_METADATA_FILENAME] + for metadata_filename in possible_filenames: + metadata_path = posixpath.join(entry, metadata_filename) + pydist = finder.find(metadata_path) + if pydist: + break + else: + continue - with contextlib.closing(pydist.as_stream()) as stream: - metadata = Metadata(fileobj=stream, scheme='legacy') - logger.debug('Found %s', r.path) - seen.add(r.path) - yield new_dist_class(r.path, metadata=metadata, - env=self) - elif self._include_egg and entry.endswith(('.egg-info', - '.egg')): - logger.debug('Found %s', r.path) - seen.add(r.path) - yield old_dist_class(r.path, self) + with contextlib.closing(pydist.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + logger.debug('Found %s', r.path) + seen.add(r.path) + yield new_dist_class(r.path, metadata=metadata, + env=self) + elif self._include_egg and entry.endswith(('.egg-info', + '.egg')): + logger.debug('Found %s', r.path) + seen.add(r.path) + yield old_dist_class(r.path, self) + except Exception as e: + msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s' + logger.warning(msg, r.path, e) + import warnings + warnings.warn(msg % (r.path, e), stacklevel=2) def _generate_cache(self): """ @@ -379,8 +385,9 @@ class Distribution(object): def _get_requirements(self, req_attr): md = self.metadata - logger.debug('Getting requirements from metadata %r', md.todict()) reqts = getattr(md, req_attr) + logger.debug('%s: got requirements %r from metadata: %r', self.name, req_attr, + reqts) return set(md.get_requirements(reqts, extras=self.extras, env=self.context)) @@ -1308,22 +1315,26 @@ def get_required_dists(dists, dist): :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested + in finding the dependencies. """ if dist not in dists: raise DistlibException('given distribution %r is not a member ' 'of the list' % dist.name) graph = make_graph(dists) - req = [] # required distributions + req = set() # required distributions todo = graph.adjacency_list[dist] # list of nodes we should inspect + seen = set(t[0] for t in todo) # already added to todo while todo: d = todo.pop()[0] - req.append(d) - for pred in graph.adjacency_list[d]: - if pred not in req: + req.add(d) + pred_list = graph.adjacency_list[d] + for pred in pred_list: + d = pred[0] + if d not in req and d not in seen: + seen.add(d) todo.append(pred) - return req diff --git a/env/Lib/site-packages/pip/_vendor/distlib/index.py b/env/Lib/site-packages/pip/_vendor/distlib/index.py index 7a87cdcf7a19987ad0344b8c5d161027b345e212..9b6d129ed690361770738bec73f44ba7e10a21c5 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/index.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/index.py @@ -12,13 +12,13 @@ import subprocess import tempfile try: from threading import Thread -except ImportError: +except ImportError: # pragma: no cover from dummy_threading import Thread from . import DistlibException from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, urlparse, build_opener, string_types) -from .util import cached_property, zip_dir, ServerProxy +from .util import zip_dir, ServerProxy logger = logging.getLogger(__name__) @@ -67,21 +67,17 @@ class PackageIndex(object): Get the distutils command for interacting with PyPI configurations. :return: the command. """ - from distutils.core import Distribution - from distutils.config import PyPIRCCommand - d = Distribution() - return PyPIRCCommand(d) + from .util import _get_pypirc_command as cmd + return cmd() def read_configuration(self): """ - Read the PyPI access configuration as supported by distutils, getting - PyPI to do the actual work. This populates ``username``, ``password``, - ``realm`` and ``url`` attributes from the configuration. + Read the PyPI access configuration as supported by distutils. This populates + ``username``, ``password``, ``realm`` and ``url`` attributes from the + configuration. """ - # get distutils to do the work - c = self._get_pypirc_command() - c.repository = self.url - cfg = c._read_pypirc() + from .util import _load_pypirc + cfg = _load_pypirc(self) self.username = cfg.get('username') self.password = cfg.get('password') self.realm = cfg.get('realm', 'pypi') @@ -91,13 +87,10 @@ class PackageIndex(object): """ Save the PyPI access configuration. You must have set ``username`` and ``password`` attributes before calling this method. - - Again, distutils is used to do the actual work. """ self.check_credentials() - # get distutils to do the work - c = self._get_pypirc_command() - c._store_pypirc(self.username, self.password) + from .util import _store_pypirc + _store_pypirc(self) def check_credentials(self): """ @@ -111,7 +104,7 @@ class PackageIndex(object): pm.add_password(self.realm, netloc, self.username, self.password) self.password_handler = HTTPBasicAuthHandler(pm) - def register(self, metadata): + def register(self, metadata): # pragma: no cover """ Register a distribution on PyPI, using the provided metadata. @@ -149,8 +142,7 @@ class PackageIndex(object): logger.debug('%s: %s' % (name, s)) stream.close() - def get_sign_command(self, filename, signer, sign_password, - keystore=None): + def get_sign_command(self, filename, signer, sign_password, keystore=None): # pragma: no cover """ Return a suitable command for signing a file. @@ -213,7 +205,7 @@ class PackageIndex(object): t2.join() return p.returncode, stdout, stderr - def sign_file(self, filename, signer, sign_password, keystore=None): + def sign_file(self, filename, signer, sign_password, keystore=None): # pragma: no cover """ Sign a file. @@ -293,7 +285,7 @@ class PackageIndex(object): request = self.encode_request(d.items(), files) return self.send_request(request) - def upload_documentation(self, metadata, doc_dir): + def upload_documentation(self, metadata, doc_dir): # pragma: no cover """ Upload documentation to the index. @@ -506,7 +498,7 @@ class PackageIndex(object): } return Request(self.url, body, headers) - def search(self, terms, operator=None): + def search(self, terms, operator=None): # pragma: no cover if isinstance(terms, string_types): terms = {'name': terms} rpc_proxy = ServerProxy(self.url, timeout=3.0) diff --git a/env/Lib/site-packages/pip/_vendor/distlib/locators.py b/env/Lib/site-packages/pip/_vendor/distlib/locators.py index 12a1d06351e50b52c8672974d4a9053b7641abaf..966ebc0e37d6104a8e0e1fefe9dc526f39409ce2 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/locators.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/locators.py @@ -20,14 +20,14 @@ import zlib from . import DistlibException from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, - queue, quote, unescape, string_types, build_opener, + queue, quote, unescape, build_opener, HTTPRedirectHandler as BaseRedirectHandler, text_type, Request, HTTPError, URLError) from .database import Distribution, DistributionPath, make_dist from .metadata import Metadata, MetadataInvalidError -from .util import (cached_property, parse_credentials, ensure_slash, - split_filename, get_project_data, parse_requirement, - parse_name_and_version, ServerProxy, normalize_name) +from .util import (cached_property, ensure_slash, split_filename, get_project_data, + parse_requirement, parse_name_and_version, ServerProxy, + normalize_name) from .version import get_scheme, UnsupportedVersionError from .wheel import Wheel, is_compatible @@ -378,13 +378,13 @@ class Locator(object): continue try: if not matcher.match(k): - logger.debug('%s did not match %r', matcher, k) + pass # logger.debug('%s did not match %r', matcher, k) else: if prereleases or not vcls(k).is_prerelease: slist.append(k) - else: - logger.debug('skipping pre-release ' - 'version %s of %s', k, matcher.name) + # else: + # logger.debug('skipping pre-release ' + # 'version %s of %s', k, matcher.name) except Exception: # pragma: no cover logger.warning('error matching %s with %r', matcher, k) pass # slist.append(k) @@ -593,7 +593,7 @@ class SimpleScrapingLocator(Locator): # These are used to deal with various Content-Encoding schemes. decoders = { 'deflate': zlib.decompress, - 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), + 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(), 'none': lambda b: b, } @@ -633,7 +633,7 @@ class SimpleScrapingLocator(Locator): self._threads = [] for i in range(self.num_workers): t = threading.Thread(target=self._fetch) - t.setDaemon(True) + t.daemon = True t.start() self._threads.append(t) @@ -1053,17 +1053,15 @@ class AggregatingLocator(Locator): # We use a legacy scheme simply because most of the dists on PyPI use legacy -# versions which don't conform to PEP 426 / PEP 440. +# versions which don't conform to PEP 440. default_locator = AggregatingLocator( - JSONLocator(), + # JSONLocator(), # don't use as PEP 426 is withdrawn SimpleScrapingLocator('https://pypi.org/simple/', timeout=3.0), scheme='legacy') locate = default_locator.locate -NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*' - r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$') class DependencyFinder(object): """ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/markers.py b/env/Lib/site-packages/pip/_vendor/distlib/markers.py index ee1f3e23655b53293cc2d5bf50e835ac2662cc7f..9dc68410337dcf4619ef66a49d87cea8233bc057 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/markers.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/markers.py @@ -13,20 +13,29 @@ Parser for the environment markers micro-language defined in PEP 508. # as ~= and === which aren't in Python, necessitating a different approach. import os +import re import sys import platform -import re -from .compat import python_implementation, urlparse, string_types +from .compat import string_types from .util import in_venv, parse_marker +from .version import NormalizedVersion as NV __all__ = ['interpret'] +_VERSION_PATTERN = re.compile(r'((\d+(\.\d+)*\w*)|\'(\d+(\.\d+)*\w*)\'|\"(\d+(\.\d+)*\w*)\")') + def _is_literal(o): if not isinstance(o, string_types) or not o: return False return o[0] in '\'"' +def _get_versions(s): + result = [] + for m in _VERSION_PATTERN.finditer(s): + result.append(NV(m.groups()[0])) + return set(result) + class Evaluator(object): """ This class is used to evaluate marker expessions. @@ -71,9 +80,18 @@ class Evaluator(object): lhs = self.evaluate(elhs, context) rhs = self.evaluate(erhs, context) + if ((elhs == 'python_version' or erhs == 'python_version') and + op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')): + lhs = NV(lhs) + rhs = NV(rhs) + elif elhs == 'python_version' and op in ('in', 'not in'): + lhs = NV(lhs) + rhs = _get_versions(rhs) result = self.operations[op](lhs, rhs) return result +_DIGITS = re.compile(r'\d+\.\d+') + def default_context(): def format_full_version(info): version = '%s.%s.%s' % (info.major, info.minor, info.micro) @@ -89,6 +107,9 @@ def default_context(): implementation_version = '0' implementation_name = '' + ppv = platform.python_version() + m = _DIGITS.match(ppv) + pv = m.group(0) result = { 'implementation_name': implementation_name, 'implementation_version': implementation_version, @@ -99,8 +120,8 @@ def default_context(): 'platform_system': platform.system(), 'platform_version': platform.version(), 'platform_in_venv': str(in_venv()), - 'python_full_version': platform.python_version(), - 'python_version': platform.python_version()[:3], + 'python_full_version': ppv, + 'python_version': pv, 'sys_platform': sys.platform, } return result diff --git a/env/Lib/site-packages/pip/_vendor/distlib/metadata.py b/env/Lib/site-packages/pip/_vendor/distlib/metadata.py index 6d5e236090d21060594251dfa166a096383356c5..c329e1977fd1ed403bb65529296d5c803a6b289f 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/metadata.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/metadata.py @@ -5,7 +5,7 @@ # """Implementation of the Metadata for Python packages PEPs. -Supports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and withdrawn 2.0). +Supports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and 2.2). """ from __future__ import unicode_literals @@ -94,17 +94,23 @@ _426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', # See issue #106: Sometimes 'Requires' and 'Provides' occur wrongly in # the metadata. Include them in the tuple literal below to allow them # (for now). +# Ditto for Obsoletes - see issue #140. _566_FIELDS = _426_FIELDS + ('Description-Content-Type', - 'Requires', 'Provides') + 'Requires', 'Provides', 'Obsoletes') _566_MARKERS = ('Description-Content-Type',) +_643_MARKERS = ('Dynamic', 'License-File') + +_643_FIELDS = _566_FIELDS + _643_MARKERS + _ALL_FIELDS = set() _ALL_FIELDS.update(_241_FIELDS) _ALL_FIELDS.update(_314_FIELDS) _ALL_FIELDS.update(_345_FIELDS) _ALL_FIELDS.update(_426_FIELDS) _ALL_FIELDS.update(_566_FIELDS) +_ALL_FIELDS.update(_643_FIELDS) EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') @@ -117,9 +123,13 @@ def _version2fieldlist(version): elif version == '1.2': return _345_FIELDS elif version in ('1.3', '2.1'): - return _345_FIELDS + _566_FIELDS + # avoid adding field names if already there + return _345_FIELDS + tuple(f for f in _566_FIELDS if f not in _345_FIELDS) elif version == '2.0': - return _426_FIELDS + raise ValueError('Metadata 2.0 is withdrawn and not supported') + # return _426_FIELDS + elif version == '2.2': + return _643_FIELDS raise MetadataUnrecognizedVersionError(version) @@ -137,7 +147,7 @@ def _best_version(fields): continue keys.append(key) - possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1'] + possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.1', '2.2'] # 2.0 removed # first let's try to see if a field is not part of one of the version for key in keys: @@ -157,9 +167,12 @@ def _best_version(fields): if key != 'Description': # In 2.1, description allowed after headers possible_versions.remove('2.1') logger.debug('Removed 2.1 due to %s', key) - if key not in _426_FIELDS and '2.0' in possible_versions: - possible_versions.remove('2.0') - logger.debug('Removed 2.0 due to %s', key) + if key not in _643_FIELDS and '2.2' in possible_versions: + possible_versions.remove('2.2') + logger.debug('Removed 2.2 due to %s', key) + # if key not in _426_FIELDS and '2.0' in possible_versions: + # possible_versions.remove('2.0') + # logger.debug('Removed 2.0 due to %s', key) # possible_version contains qualified versions if len(possible_versions) == 1: @@ -172,16 +185,18 @@ def _best_version(fields): is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS) - is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) - if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1: - raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields') + # is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) + is_2_2 = '2.2' in possible_versions and _has_marker(keys, _643_MARKERS) + if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_2) > 1: + raise MetadataConflictError('You used incompatible 1.1/1.2/2.1/2.2 fields') - # we have the choice, 1.0, or 1.2, or 2.0 + # we have the choice, 1.0, or 1.2, 2.1 or 2.2 # - 1.0 has a broken Summary field but works with all tools # - 1.1 is to avoid # - 1.2 fixes Summary but has little adoption - # - 2.0 adds more features and is very new - if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0: + # - 2.1 adds more features + # - 2.2 is the latest + if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_2: # we couldn't find any specific marker if PKG_INFO_PREFERRED_VERSION in possible_versions: return PKG_INFO_PREFERRED_VERSION @@ -191,8 +206,10 @@ def _best_version(fields): return '1.2' if is_2_1: return '2.1' + # if is_2_2: + # return '2.2' - return '2.0' + return '2.2' # This follows the rules about transforming keys as described in # https://www.python.org/dev/peps/pep-0566/#id17 @@ -208,7 +225,7 @@ _LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', 'Requires', 'Provides', 'Obsoletes-Dist', 'Provides-Dist', 'Requires-Dist', 'Requires-External', 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', - 'Provides-Extra', 'Extension') + 'Provides-Extra', 'Extension', 'License-File') _LISTTUPLEFIELDS = ('Project-URL',) _ELEMENTSFIELD = ('Keywords',) @@ -600,7 +617,7 @@ LEGACY_METADATA_FILENAME = 'METADATA' class Metadata(object): """ - The metadata of a release. This implementation uses 2.0 (JSON) + The metadata of a release. This implementation uses 2.1 metadata where possible. If not possible, it wraps a LegacyMetadata instance which handles the key-value metadata format. """ @@ -609,6 +626,8 @@ class Metadata(object): NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) + FIELDNAME_MATCHER = re.compile('^[A-Z]([0-9A-Z-]*[0-9A-Z])?$', re.I) + VERSION_MATCHER = PEP440_VERSION_RE SUMMARY_MATCHER = re.compile('.{1,2047}') @@ -636,6 +655,7 @@ class Metadata(object): 'name': (NAME_MATCHER, ('legacy',)), 'version': (VERSION_MATCHER, ('legacy',)), 'summary': (SUMMARY_MATCHER, ('legacy',)), + 'dynamic': (FIELDNAME_MATCHER, ('legacy',)), } __slots__ = ('_legacy', '_data', 'scheme') diff --git a/env/Lib/site-packages/pip/_vendor/distlib/resources.py b/env/Lib/site-packages/pip/_vendor/distlib/resources.py index 18840167a9e3ea1cf443b0803230e41481df41d2..fef52aa103ea369c96567b9af2a5a0ba14db5cb9 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/resources.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/resources.py @@ -11,13 +11,12 @@ import io import logging import os import pkgutil -import shutil import sys import types import zipimport from . import DistlibException -from .util import cached_property, get_cache_base, path_to_cache_dir, Cache +from .util import cached_property, get_cache_base, Cache logger = logging.getLogger(__name__) @@ -283,6 +282,7 @@ class ZipResourceFinder(ResourceFinder): result = False return result + _finder_registry = { type(None): ResourceFinder, zipimport.zipimporter: ZipResourceFinder @@ -296,6 +296,8 @@ try: import _frozen_importlib as _fi _finder_registry[_fi.SourceFileLoader] = ResourceFinder _finder_registry[_fi.FileFinder] = ResourceFinder + # See issue #146 + _finder_registry[_fi.SourcelessFileLoader] = ResourceFinder del _fi except (ImportError, AttributeError): pass @@ -304,6 +306,7 @@ except (ImportError, AttributeError): def register_finder(loader, finder_maker): _finder_registry[type(loader)] = finder_maker + _finder_cache = {} diff --git a/env/Lib/site-packages/pip/_vendor/distlib/scripts.py b/env/Lib/site-packages/pip/_vendor/distlib/scripts.py index 03f8f21e0ff265fa753dbb5fdb6ec020141adb09..d2706242b8aac125a66450d5ce8dcd3395336182 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/scripts.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/scripts.py @@ -10,11 +10,13 @@ import os import re import struct import sys +import time +from zipfile import ZipInfo from .compat import sysconfig, detect_encoding, ZipFile from .resources import finder from .util import (FileOperator, get_export_entry, convert_path, - get_executable, in_venv) + get_executable, get_platform, in_venv) logger = logging.getLogger(__name__) @@ -170,6 +172,11 @@ class ScriptMaker(object): sysconfig.get_config_var('BINDIR'), 'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE'))) + if not os.path.isfile(executable): + # for Python builds from source on Windows, no Python executables with + # a version suffix are created, so we use python.exe + executable = os.path.join(sysconfig.get_config_var('BINDIR'), + 'python%s' % (sysconfig.get_config_var('EXE'))) if options: executable = self._get_alternate_executable(executable, options) @@ -244,7 +251,13 @@ class ScriptMaker(object): launcher = self._get_launcher('w') stream = BytesIO() with ZipFile(stream, 'w') as zf: - zf.writestr('__main__.py', script_bytes) + source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH') + if source_date_epoch: + date_time = time.gmtime(int(source_date_epoch))[:6] + zinfo = ZipInfo(filename='__main__.py', date_time=date_time) + zf.writestr(zinfo, script_bytes) + else: + zf.writestr('__main__.py', script_bytes) zip_data = stream.getvalue() script_bytes = launcher + shebang + zip_data for name in names: @@ -282,6 +295,19 @@ class ScriptMaker(object): self._fileop.set_executable_mode([outname]) filenames.append(outname) + variant_separator = '-' + + def get_script_filenames(self, name): + result = set() + if '' in self.variants: + result.add(name) + if 'X' in self.variants: + result.add('%s%s' % (name, self.version_info[0])) + if 'X.Y' in self.variants: + result.add('%s%s%s.%s' % (name, self.variant_separator, + self.version_info[0], self.version_info[1])) + return result + def _make_script(self, entry, filenames, options=None): post_interp = b'' if options: @@ -291,15 +317,7 @@ class ScriptMaker(object): post_interp = args.encode('utf-8') shebang = self._get_shebang('utf-8', post_interp, options=options) script = self._get_script_text(entry).encode('utf-8') - name = entry.name - scriptnames = set() - if '' in self.variants: - scriptnames.add(name) - if 'X' in self.variants: - scriptnames.add('%s%s' % (name, self.version_info[0])) - if 'X.Y' in self.variants: - scriptnames.add('%s-%s.%s' % (name, self.version_info[0], - self.version_info[1])) + scriptnames = self.get_script_filenames(entry.name) if options and options.get('gui', False): ext = 'pyw' else: @@ -326,8 +344,7 @@ class ScriptMaker(object): else: first_line = f.readline() if not first_line: # pragma: no cover - logger.warning('%s: %s is an empty file (skipping)', - self.get_command_name(), script) + logger.warning('%s is an empty file (skipping)', script) return match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) @@ -375,7 +392,8 @@ class ScriptMaker(object): bits = '64' else: bits = '32' - name = '%s%s.exe' % (kind, bits) + platform_suffix = '-arm' if get_platform() == 'win-arm64' else '' + name = '%s%s%s.exe' % (kind, bits, platform_suffix) # Issue 31: don't hardcode an absolute package name, but # determine it relative to the current package distlib_package = __name__.rsplit('.', 1)[0] diff --git a/env/Lib/site-packages/pip/_vendor/distlib/t32.exe b/env/Lib/site-packages/pip/_vendor/distlib/t32.exe index 8932a18e4596952373a38c60b81b7116d4ef9ee8..52154f0be32cc2bdbf98af131d477900667d0abd 100644 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/t32.exe and b/env/Lib/site-packages/pip/_vendor/distlib/t32.exe differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/t64.exe b/env/Lib/site-packages/pip/_vendor/distlib/t64.exe index 325b8057c08cf7113d4fd889991fa5638d443793..e8bebdba6d8f242244bf397ab067965d47c5093e 100644 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/t64.exe and b/env/Lib/site-packages/pip/_vendor/distlib/t64.exe differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/util.py b/env/Lib/site-packages/pip/_vendor/distlib/util.py index 01324eae462f3384358a77e649b9c59a26cdfa9e..dd01849d997e5ae9dc9809295e29ceb871b14216 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/util.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/util.py @@ -1,5 +1,5 @@ # -# Copyright (C) 2012-2017 The Python Software Foundation. +# Copyright (C) 2012-2021 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # import codecs @@ -215,6 +215,10 @@ def parse_requirement(req): if not ver_remaining or ver_remaining[0] != ',': break ver_remaining = ver_remaining[1:].lstrip() + # Some packages have a trailing comma which would break things + # See issue #148 + if not ver_remaining: + break m = COMPARE_OP.match(ver_remaining) if not m: raise SyntaxError('invalid constraint: %s' % ver_remaining) @@ -309,7 +313,9 @@ def get_executable(): # else: # result = sys.executable # return result - result = os.path.normcase(sys.executable) + # Avoid normcasing: see issue #143 + # result = os.path.normcase(sys.executable) + result = sys.executable if not isinstance(result, text_type): result = fsdecode(result) return result @@ -1426,29 +1432,19 @@ if ssl: self.sock = sock self._tunnel() - if not hasattr(ssl, 'SSLContext'): - # For 2.x - if self.ca_certs: - cert_reqs = ssl.CERT_REQUIRED - else: - cert_reqs = ssl.CERT_NONE - self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, - cert_reqs=cert_reqs, - ssl_version=ssl.PROTOCOL_SSLv23, - ca_certs=self.ca_certs) - else: # pragma: no cover - context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) - if hasattr(ssl, 'OP_NO_SSLv2'): - context.options |= ssl.OP_NO_SSLv2 - if self.cert_file: - context.load_cert_chain(self.cert_file, self.key_file) - kwargs = {} - if self.ca_certs: - context.verify_mode = ssl.CERT_REQUIRED - context.load_verify_locations(cafile=self.ca_certs) - if getattr(ssl, 'HAS_SNI', False): - kwargs['server_hostname'] = self.host - self.sock = context.wrap_socket(sock, **kwargs) + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + if hasattr(ssl, 'OP_NO_SSLv2'): + context.options |= ssl.OP_NO_SSLv2 + if self.cert_file: + context.load_cert_chain(self.cert_file, self.key_file) + kwargs = {} + if self.ca_certs: + context.verify_mode = ssl.CERT_REQUIRED + context.load_verify_locations(cafile=self.ca_certs) + if getattr(ssl, 'HAS_SNI', False): + kwargs['server_hostname'] = self.host + + self.sock = context.wrap_socket(sock, **kwargs) if self.ca_certs and self.check_domain: try: match_hostname(self.sock.getpeercert(), self.host) @@ -1507,25 +1503,6 @@ if ssl: # # XML-RPC with timeouts # - -_ver_info = sys.version_info[:2] - -if _ver_info == (2, 6): - class HTTP(httplib.HTTP): - def __init__(self, host='', port=None, **kwargs): - if port == 0: # 0 means use port 0, not the default port - port = None - self._setup(self._connection_class(host, port, **kwargs)) - - - if ssl: - class HTTPS(httplib.HTTPS): - def __init__(self, host='', port=None, **kwargs): - if port == 0: # 0 means use port 0, not the default port - port = None - self._setup(self._connection_class(host, port, **kwargs)) - - class Transport(xmlrpclib.Transport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout @@ -1533,14 +1510,10 @@ class Transport(xmlrpclib.Transport): def make_connection(self, host): h, eh, x509 = self.get_host_info(host) - if _ver_info == (2, 6): - result = HTTP(h, timeout=self.timeout) - else: - if not self._connection or host != self._connection[0]: - self._extra_headers = eh - self._connection = host, httplib.HTTPConnection(h) - result = self._connection[1] - return result + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPConnection(h) + return self._connection[1] if ssl: class SafeTransport(xmlrpclib.SafeTransport): @@ -1553,15 +1526,11 @@ if ssl: if not kwargs: kwargs = {} kwargs['timeout'] = self.timeout - if _ver_info == (2, 6): - result = HTTPS(host, None, **kwargs) - else: - if not self._connection or host != self._connection[0]: - self._extra_headers = eh - self._connection = host, httplib.HTTPSConnection(h, None, - **kwargs) - result = self._connection[1] - return result + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPSConnection(h, None, + **kwargs) + return self._connection[1] class ServerProxy(xmlrpclib.ServerProxy): @@ -1570,7 +1539,8 @@ class ServerProxy(xmlrpclib.ServerProxy): # The above classes only come into play if a timeout # is specified if timeout is not None: - scheme, _ = splittype(uri) + # scheme = splittype(uri) # deprecated as of Python 3.8 + scheme = urlparse(uri)[0] use_datetime = kwargs.get('use_datetime', 0) if scheme == 'https': tcls = SafeTransport @@ -1759,3 +1729,204 @@ def normalize_name(name): """Normalize a python package name a la PEP 503""" # https://www.python.org/dev/peps/pep-0503/#normalized-names return re.sub('[-_.]+', '-', name).lower() + +# def _get_pypirc_command(): + # """ + # Get the distutils command for interacting with PyPI configurations. + # :return: the command. + # """ + # from distutils.core import Distribution + # from distutils.config import PyPIRCCommand + # d = Distribution() + # return PyPIRCCommand(d) + +class PyPIRCFile(object): + + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' + DEFAULT_REALM = 'pypi' + + def __init__(self, fn=None, url=None): + if fn is None: + fn = os.path.join(os.path.expanduser('~'), '.pypirc') + self.filename = fn + self.url = url + + def read(self): + result = {} + + if os.path.exists(self.filename): + repository = self.url or self.DEFAULT_REPOSITORY + + config = configparser.RawConfigParser() + config.read(self.filename) + sections = config.sections() + if 'distutils' in sections: + # let's get the list of servers + index_servers = config.get('distutils', 'index-servers') + _servers = [server.strip() for server in + index_servers.split('\n') + if server.strip() != ''] + if _servers == []: + # nothing set, let's try to get the default pypi + if 'pypi' in sections: + _servers = ['pypi'] + else: + for server in _servers: + result = {'server': server} + result['username'] = config.get(server, 'username') + + # optional params + for key, default in (('repository', self.DEFAULT_REPOSITORY), + ('realm', self.DEFAULT_REALM), + ('password', None)): + if config.has_option(server, key): + result[key] = config.get(server, key) + else: + result[key] = default + + # work around people having "repository" for the "pypi" + # section of their config set to the HTTP (rather than + # HTTPS) URL + if (server == 'pypi' and + repository in (self.DEFAULT_REPOSITORY, 'pypi')): + result['repository'] = self.DEFAULT_REPOSITORY + elif (result['server'] != repository and + result['repository'] != repository): + result = {} + elif 'server-login' in sections: + # old format + server = 'server-login' + if config.has_option(server, 'repository'): + repository = config.get(server, 'repository') + else: + repository = self.DEFAULT_REPOSITORY + result = { + 'username': config.get(server, 'username'), + 'password': config.get(server, 'password'), + 'repository': repository, + 'server': server, + 'realm': self.DEFAULT_REALM + } + return result + + def update(self, username, password): + # import pdb; pdb.set_trace() + config = configparser.RawConfigParser() + fn = self.filename + config.read(fn) + if not config.has_section('pypi'): + config.add_section('pypi') + config.set('pypi', 'username', username) + config.set('pypi', 'password', password) + with open(fn, 'w') as f: + config.write(f) + +def _load_pypirc(index): + """ + Read the PyPI access configuration as supported by distutils. + """ + return PyPIRCFile(url=index.url).read() + +def _store_pypirc(index): + PyPIRCFile().update(index.username, index.password) + +# +# get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor +# tweaks +# + +def get_host_platform(): + """Return a string that identifies the current platform. This is used mainly to + distinguish platform-specific build directories and platform-specific built + distributions. Typically includes the OS name and version and the + architecture (as supplied by 'os.uname()'), although the exact information + included depends on the OS; eg. on Linux, the kernel version isn't + particularly important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + + """ + if os.name == 'nt': + if 'amd64' in sys.version.lower(): + return 'win-amd64' + if '(arm)' in sys.version.lower(): + return 'win-arm32' + if '(arm64)' in sys.version.lower(): + return 'win-arm64' + return sys.platform + + # Set for cross builds explicitly + if "_PYTHON_HOST_PLATFORM" in os.environ: + return os.environ["_PYTHON_HOST_PLATFORM"] + + if os.name != 'posix' or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha, + # Mac OS is M68k or PPC, etc. + return sys.platform + + # Try to distinguish various flavours of Unix + + (osname, host, release, version, machine) = os.uname() + + # Convert the OS name to lowercase, remove '/' characters, and translate + # spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_').replace('/', '-') + + if osname[:5] == 'linux': + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return "%s-%s" % (osname, machine) + + elif osname[:5] == 'sunos': + if release[0] >= '5': # SunOS 5 == Solaris 2 + osname = 'solaris' + release = '%d.%s' % (int(release[0]) - 3, release[2:]) + # We can't use 'platform.architecture()[0]' because a + # bootstrap problem. We use a dict to get an error + # if some suspicious happens. + bitness = {2147483647:'32bit', 9223372036854775807:'64bit'} + machine += '.%s' % bitness[sys.maxsize] + # fall through to standard osname-release-machine representation + elif osname[:3] == 'aix': + from _aix_support import aix_platform + return aix_platform() + elif osname[:6] == 'cygwin': + osname = 'cygwin' + rel_re = re.compile (r'[\d.]+', re.ASCII) + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == 'darwin': + import _osx_support, distutils.sysconfig + osname, release, machine = _osx_support.get_platform_osx( + distutils.sysconfig.get_config_vars(), + osname, release, machine) + + return '%s-%s-%s' % (osname, release, machine) + + +_TARGET_TO_PLAT = { + 'x86' : 'win32', + 'x64' : 'win-amd64', + 'arm' : 'win-arm32', +} + + +def get_platform(): + if os.name != 'nt': + return get_host_platform() + cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH') + if cross_compilation_target not in _TARGET_TO_PLAT: + return get_host_platform() + return _TARGET_TO_PLAT[cross_compilation_target] diff --git a/env/Lib/site-packages/pip/_vendor/distlib/version.py b/env/Lib/site-packages/pip/_vendor/distlib/version.py index 3eebe18ee849dcafe39246ce2f4cabbe8e5a2b33..c7c8bb6ff4f8ed84e466a66cac6b953b901626ea 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/version.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/version.py @@ -194,7 +194,7 @@ def _pep_440_key(s): if not groups[0]: epoch = 0 else: - epoch = int(groups[0]) + epoch = int(groups[0][:-1]) pre = groups[4:6] post = groups[7:9] dev = groups[10:12] @@ -710,6 +710,9 @@ class VersionScheme(object): """ Used for processing some metadata fields """ + # See issue #140. Be tolerant of a single trailing comma. + if s.endswith(','): + s = s[:-1] return self.is_valid_matcher('dummy_name (%s)' % s) def suggest(self, s): diff --git a/env/Lib/site-packages/pip/_vendor/distlib/w32.exe b/env/Lib/site-packages/pip/_vendor/distlib/w32.exe index e6439e9e45897365d5ac6a85a46864c158a225fd..4ee2d3a31b59e8b50f433ecdf0be9e496e8cc3b8 100644 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/w32.exe and b/env/Lib/site-packages/pip/_vendor/distlib/w32.exe differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/w64.exe b/env/Lib/site-packages/pip/_vendor/distlib/w64.exe index 46139dbf9400b7bc0b64e6756ce17b4eb5fd7436..5763076d2878093971a0ef9870e1cde7f556b18b 100644 Binary files a/env/Lib/site-packages/pip/_vendor/distlib/w64.exe and b/env/Lib/site-packages/pip/_vendor/distlib/w64.exe differ diff --git a/env/Lib/site-packages/pip/_vendor/distlib/wheel.py b/env/Lib/site-packages/pip/_vendor/distlib/wheel.py index 1e2c7a020c931bff9cdb3aa1647be0264d927e7d..028c2d99b57782ed3bb268ce522ede37c1704d98 100644 --- a/env/Lib/site-packages/pip/_vendor/distlib/wheel.py +++ b/env/Lib/site-packages/pip/_vendor/distlib/wheel.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2013-2017 Vinay Sajip. +# Copyright (C) 2013-2020 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # @@ -9,10 +9,8 @@ from __future__ import unicode_literals import base64 import codecs import datetime -import distutils.util from email import message_from_file import hashlib -import imp import json import logging import os @@ -29,7 +27,8 @@ from .database import InstalledDistribution from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME) from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, - cached_property, get_cache_base, read_exports, tempdir) + cached_property, get_cache_base, read_exports, tempdir, + get_platform) from .version import NormalizedVersion, UnsupportedVersionError logger = logging.getLogger(__name__) @@ -51,20 +50,28 @@ if not VER_SUFFIX: # pragma: no cover PYVER = 'py' + VER_SUFFIX IMPVER = IMP_PREFIX + VER_SUFFIX -ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_') +ARCH = get_platform().replace('-', '_').replace('.', '_') ABI = sysconfig.get_config_var('SOABI') if ABI and ABI.startswith('cpython-'): - ABI = ABI.replace('cpython-', 'cp') + ABI = ABI.replace('cpython-', 'cp').split('-')[0] else: def _derive_abi(): parts = ['cp', VER_SUFFIX] if sysconfig.get_config_var('Py_DEBUG'): parts.append('d') - if sysconfig.get_config_var('WITH_PYMALLOC'): - parts.append('m') - if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4: - parts.append('u') + if IMP_PREFIX == 'cp': + vi = sys.version_info[:2] + if vi < (3, 8): + wpm = sysconfig.get_config_var('WITH_PYMALLOC') + if wpm is None: + wpm = True + if wpm: + parts.append('m') + if vi < (3, 3): + us = sysconfig.get_config_var('Py_UNICODE_SIZE') + if us == 4 or (us is None and sys.maxunicode == 0x10FFFF): + parts.append('u') return ''.join(parts) ABI = _derive_abi() del _derive_abi @@ -95,6 +102,29 @@ if os.sep == '/': else: to_posix = lambda o: o.replace(os.sep, '/') +if sys.version_info[0] < 3: + import imp +else: + imp = None + import importlib.machinery + import importlib.util + +def _get_suffixes(): + if imp: + return [s[0] for s in imp.get_suffixes()] + else: + return importlib.machinery.EXTENSION_SUFFIXES + +def _load_dynamic(name, path): + # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly + if imp: + return imp.load_dynamic(name, path) + else: + spec = importlib.util.spec_from_file_location(name, path) + module = importlib.util.module_from_spec(spec) + sys.modules[name] = module + spec.loader.exec_module(module) + return module class Mounter(object): def __init__(self): @@ -124,7 +154,7 @@ class Mounter(object): else: if fullname not in self.libs: raise ImportError('unable to find extension for %s' % fullname) - result = imp.load_dynamic(fullname, self.libs[fullname]) + result = _load_dynamic(fullname, self.libs[fullname]) result.__loader__ = self parts = fullname.rsplit('.', 1) if len(parts) > 1: @@ -301,10 +331,9 @@ class Wheel(object): result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') return hash_kind, result - def write_record(self, records, record_path, base): + def write_record(self, records, record_path, archive_record_path): records = list(records) # make a copy, as mutated - p = to_posix(os.path.relpath(record_path, base)) - records.append((p, '', '')) + records.append((archive_record_path, '', '')) with CSVWriter(record_path) as writer: for row in records: writer.writerow(row) @@ -321,8 +350,8 @@ class Wheel(object): records.append((ap, digest, size)) p = os.path.join(distinfo, 'RECORD') - self.write_record(records, p, libdir) ap = to_posix(os.path.join(info_dir, 'RECORD')) + self.write_record(records, p, ap) archive_paths.append((ap, p)) def build_zip(self, pathname, archive_paths): @@ -576,6 +605,13 @@ class Wheel(object): if not is_script: with zf.open(arcname) as bf: fileop.copy_stream(bf, outfile) + # Issue #147: permission bits aren't preserved. Using + # zf.extract(zinfo, libdir) should have worked, but didn't, + # see https://www.thetopsites.net/article/53834422.shtml + # So ... manually preserve permission bits as given in zinfo + if os.name == 'posix': + # just set the normal permission bits + os.chmod(outfile, (zinfo.external_attr >> 16) & 0x1FF) outfiles.append(outfile) # Double check the digest of the written file if not dry_run and row[1]: @@ -938,6 +974,16 @@ class Wheel(object): shutil.copyfile(newpath, pathname) return modified +def _get_glibc_version(): + import platform + ver = platform.libc_ver() + result = [] + if ver[0] == 'glibc': + for s in ver[1].split('.'): + result.append(int(s) if s.isdigit() else 0) + result = tuple(result) + return result + def compatible_tags(): """ Return (pyver, abi, arch) tuples compatible with this Python. @@ -948,7 +994,7 @@ def compatible_tags(): versions.append(''.join([major, str(minor)])) abis = [] - for suffix, _, _ in imp.get_suffixes(): + for suffix in _get_suffixes(): if suffix.startswith('.abi'): abis.append(suffix.split('.', 2)[1]) abis.sort() @@ -985,6 +1031,23 @@ def compatible_tags(): for abi in abis: for arch in arches: result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) + # manylinux + if abi != 'none' and sys.platform.startswith('linux'): + arch = arch.replace('linux_', '') + parts = _get_glibc_version() + if len(parts) == 2: + if parts >= (2, 5): + result.append((''.join((IMP_PREFIX, versions[0])), abi, + 'manylinux1_%s' % arch)) + if parts >= (2, 12): + result.append((''.join((IMP_PREFIX, versions[0])), abi, + 'manylinux2010_%s' % arch)) + if parts >= (2, 17): + result.append((''.join((IMP_PREFIX, versions[0])), abi, + 'manylinux2014_%s' % arch)) + result.append((''.join((IMP_PREFIX, versions[0])), abi, + 'manylinux_%s_%s_%s' % (parts[0], parts[1], + arch))) # where no ABI / arch dependency, but IMP_PREFIX dependency for i, version in enumerate(versions): @@ -997,6 +1060,7 @@ def compatible_tags(): result.append((''.join(('py', version)), 'none', 'any')) if i == 0: result.append((''.join(('py', version[0])), 'none', 'any')) + return set(result) diff --git a/env/Lib/site-packages/pip/_vendor/distro.py b/env/Lib/site-packages/pip/_vendor/distro.py deleted file mode 100644 index 0611b62a3a80fc60b7c7cc4bae917245e0a82b7e..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/distro.py +++ /dev/null @@ -1,1230 +0,0 @@ -# Copyright 2015,2016,2017 Nir Cohen -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -The ``distro`` package (``distro`` stands for Linux Distribution) provides -information about the Linux distribution it runs on, such as a reliable -machine-readable distro ID, or version information. - -It is the recommended replacement for Python's original -:py:func:`platform.linux_distribution` function, but it provides much more -functionality. An alternative implementation became necessary because Python -3.5 deprecated this function, and Python 3.8 will remove it altogether. -Its predecessor function :py:func:`platform.dist` was already -deprecated since Python 2.6 and will also be removed in Python 3.8. -Still, there are many cases in which access to OS distribution information -is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for -more information. -""" - -import os -import re -import sys -import json -import shlex -import logging -import argparse -import subprocess - - -_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc') -_OS_RELEASE_BASENAME = 'os-release' - -#: Translation table for normalizing the "ID" attribute defined in os-release -#: files, for use by the :func:`distro.id` method. -#: -#: * Key: Value as defined in the os-release file, translated to lower case, -#: with blanks translated to underscores. -#: -#: * Value: Normalized value. -NORMALIZED_OS_ID = { - 'ol': 'oracle', # Oracle Linux -} - -#: Translation table for normalizing the "Distributor ID" attribute returned by -#: the lsb_release command, for use by the :func:`distro.id` method. -#: -#: * Key: Value as returned by the lsb_release command, translated to lower -#: case, with blanks translated to underscores. -#: -#: * Value: Normalized value. -NORMALIZED_LSB_ID = { - 'enterpriseenterpriseas': 'oracle', # Oracle Enterprise Linux 4 - 'enterpriseenterpriseserver': 'oracle', # Oracle Linux 5 - 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation - 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server - 'redhatenterprisecomputenode': 'rhel', # RHEL 6 ComputeNode -} - -#: Translation table for normalizing the distro ID derived from the file name -#: of distro release files, for use by the :func:`distro.id` method. -#: -#: * Key: Value as derived from the file name of a distro release file, -#: translated to lower case, with blanks translated to underscores. -#: -#: * Value: Normalized value. -NORMALIZED_DISTRO_ID = { - 'redhat': 'rhel', # RHEL 6.x, 7.x -} - -# Pattern for content of distro release file (reversed) -_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( - r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)') - -# Pattern for base file name of distro release file -_DISTRO_RELEASE_BASENAME_PATTERN = re.compile( - r'(\w+)[-_](release|version)$') - -# Base file names to be ignored when searching for distro release file -_DISTRO_RELEASE_IGNORE_BASENAMES = ( - 'debian_version', - 'lsb-release', - 'oem-release', - _OS_RELEASE_BASENAME, - 'system-release', - 'plesk-release', -) - - -def linux_distribution(full_distribution_name=True): - """ - Return information about the current OS distribution as a tuple - ``(id_name, version, codename)`` with items as follows: - - * ``id_name``: If *full_distribution_name* is false, the result of - :func:`distro.id`. Otherwise, the result of :func:`distro.name`. - - * ``version``: The result of :func:`distro.version`. - - * ``codename``: The result of :func:`distro.codename`. - - The interface of this function is compatible with the original - :py:func:`platform.linux_distribution` function, supporting a subset of - its parameters. - - The data it returns may not exactly be the same, because it uses more data - sources than the original function, and that may lead to different data if - the OS distribution is not consistent across multiple data sources it - provides (there are indeed such distributions ...). - - Another reason for differences is the fact that the :func:`distro.id` - method normalizes the distro ID string to a reliable machine-readable value - for a number of popular OS distributions. - """ - return _distro.linux_distribution(full_distribution_name) - - -def id(): - """ - Return the distro ID of the current distribution, as a - machine-readable string. - - For a number of OS distributions, the returned distro ID value is - *reliable*, in the sense that it is documented and that it does not change - across releases of the distribution. - - This package maintains the following reliable distro ID values: - - ============== ========================================= - Distro ID Distribution - ============== ========================================= - "ubuntu" Ubuntu - "debian" Debian - "rhel" RedHat Enterprise Linux - "centos" CentOS - "fedora" Fedora - "sles" SUSE Linux Enterprise Server - "opensuse" openSUSE - "amazon" Amazon Linux - "arch" Arch Linux - "cloudlinux" CloudLinux OS - "exherbo" Exherbo Linux - "gentoo" GenToo Linux - "ibm_powerkvm" IBM PowerKVM - "kvmibm" KVM for IBM z Systems - "linuxmint" Linux Mint - "mageia" Mageia - "mandriva" Mandriva Linux - "parallels" Parallels - "pidora" Pidora - "raspbian" Raspbian - "oracle" Oracle Linux (and Oracle Enterprise Linux) - "scientific" Scientific Linux - "slackware" Slackware - "xenserver" XenServer - "openbsd" OpenBSD - "netbsd" NetBSD - "freebsd" FreeBSD - "midnightbsd" MidnightBSD - ============== ========================================= - - If you have a need to get distros for reliable IDs added into this set, - or if you find that the :func:`distro.id` function returns a different - distro ID for one of the listed distros, please create an issue in the - `distro issue tracker`_. - - **Lookup hierarchy and transformations:** - - First, the ID is obtained from the following sources, in the specified - order. The first available and non-empty value is used: - - * the value of the "ID" attribute of the os-release file, - - * the value of the "Distributor ID" attribute returned by the lsb_release - command, - - * the first part of the file name of the distro release file, - - The so determined ID value then passes the following transformations, - before it is returned by this method: - - * it is translated to lower case, - - * blanks (which should not be there anyway) are translated to underscores, - - * a normalization of the ID is performed, based upon - `normalization tables`_. The purpose of this normalization is to ensure - that the ID is as reliable as possible, even across incompatible changes - in the OS distributions. A common reason for an incompatible change is - the addition of an os-release file, or the addition of the lsb_release - command, with ID values that differ from what was previously determined - from the distro release file name. - """ - return _distro.id() - - -def name(pretty=False): - """ - Return the name of the current OS distribution, as a human-readable - string. - - If *pretty* is false, the name is returned without version or codename. - (e.g. "CentOS Linux") - - If *pretty* is true, the version and codename are appended. - (e.g. "CentOS Linux 7.1.1503 (Core)") - - **Lookup hierarchy:** - - The name is obtained from the following sources, in the specified order. - The first available and non-empty value is used: - - * If *pretty* is false: - - - the value of the "NAME" attribute of the os-release file, - - - the value of the "Distributor ID" attribute returned by the lsb_release - command, - - - the value of the "<name>" field of the distro release file. - - * If *pretty* is true: - - - the value of the "PRETTY_NAME" attribute of the os-release file, - - - the value of the "Description" attribute returned by the lsb_release - command, - - - the value of the "<name>" field of the distro release file, appended - with the value of the pretty version ("<version_id>" and "<codename>" - fields) of the distro release file, if available. - """ - return _distro.name(pretty) - - -def version(pretty=False, best=False): - """ - Return the version of the current OS distribution, as a human-readable - string. - - If *pretty* is false, the version is returned without codename (e.g. - "7.0"). - - If *pretty* is true, the codename in parenthesis is appended, if the - codename is non-empty (e.g. "7.0 (Maipo)"). - - Some distributions provide version numbers with different precisions in - the different sources of distribution information. Examining the different - sources in a fixed priority order does not always yield the most precise - version (e.g. for Debian 8.2, or CentOS 7.1). - - The *best* parameter can be used to control the approach for the returned - version: - - If *best* is false, the first non-empty version number in priority order of - the examined sources is returned. - - If *best* is true, the most precise version number out of all examined - sources is returned. - - **Lookup hierarchy:** - - In all cases, the version number is obtained from the following sources. - If *best* is false, this order represents the priority order: - - * the value of the "VERSION_ID" attribute of the os-release file, - * the value of the "Release" attribute returned by the lsb_release - command, - * the version number parsed from the "<version_id>" field of the first line - of the distro release file, - * the version number parsed from the "PRETTY_NAME" attribute of the - os-release file, if it follows the format of the distro release files. - * the version number parsed from the "Description" attribute returned by - the lsb_release command, if it follows the format of the distro release - files. - """ - return _distro.version(pretty, best) - - -def version_parts(best=False): - """ - Return the version of the current OS distribution as a tuple - ``(major, minor, build_number)`` with items as follows: - - * ``major``: The result of :func:`distro.major_version`. - - * ``minor``: The result of :func:`distro.minor_version`. - - * ``build_number``: The result of :func:`distro.build_number`. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.version_parts(best) - - -def major_version(best=False): - """ - Return the major version of the current OS distribution, as a string, - if provided. - Otherwise, the empty string is returned. The major version is the first - part of the dot-separated version string. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.major_version(best) - - -def minor_version(best=False): - """ - Return the minor version of the current OS distribution, as a string, - if provided. - Otherwise, the empty string is returned. The minor version is the second - part of the dot-separated version string. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.minor_version(best) - - -def build_number(best=False): - """ - Return the build number of the current OS distribution, as a string, - if provided. - Otherwise, the empty string is returned. The build number is the third part - of the dot-separated version string. - - For a description of the *best* parameter, see the :func:`distro.version` - method. - """ - return _distro.build_number(best) - - -def like(): - """ - Return a space-separated list of distro IDs of distributions that are - closely related to the current OS distribution in regards to packaging - and programming interfaces, for example distributions the current - distribution is a derivative from. - - **Lookup hierarchy:** - - This information item is only provided by the os-release file. - For details, see the description of the "ID_LIKE" attribute in the - `os-release man page - <http://www.freedesktop.org/software/systemd/man/os-release.html>`_. - """ - return _distro.like() - - -def codename(): - """ - Return the codename for the release of the current OS distribution, - as a string. - - If the distribution does not have a codename, an empty string is returned. - - Note that the returned codename is not always really a codename. For - example, openSUSE returns "x86_64". This function does not handle such - cases in any special way and just returns the string it finds, if any. - - **Lookup hierarchy:** - - * the codename within the "VERSION" attribute of the os-release file, if - provided, - - * the value of the "Codename" attribute returned by the lsb_release - command, - - * the value of the "<codename>" field of the distro release file. - """ - return _distro.codename() - - -def info(pretty=False, best=False): - """ - Return certain machine-readable information items about the current OS - distribution in a dictionary, as shown in the following example: - - .. sourcecode:: python - - { - 'id': 'rhel', - 'version': '7.0', - 'version_parts': { - 'major': '7', - 'minor': '0', - 'build_number': '' - }, - 'like': 'fedora', - 'codename': 'Maipo' - } - - The dictionary structure and keys are always the same, regardless of which - information items are available in the underlying data sources. The values - for the various keys are as follows: - - * ``id``: The result of :func:`distro.id`. - - * ``version``: The result of :func:`distro.version`. - - * ``version_parts -> major``: The result of :func:`distro.major_version`. - - * ``version_parts -> minor``: The result of :func:`distro.minor_version`. - - * ``version_parts -> build_number``: The result of - :func:`distro.build_number`. - - * ``like``: The result of :func:`distro.like`. - - * ``codename``: The result of :func:`distro.codename`. - - For a description of the *pretty* and *best* parameters, see the - :func:`distro.version` method. - """ - return _distro.info(pretty, best) - - -def os_release_info(): - """ - Return a dictionary containing key-value pairs for the information items - from the os-release file data source of the current OS distribution. - - See `os-release file`_ for details about these information items. - """ - return _distro.os_release_info() - - -def lsb_release_info(): - """ - Return a dictionary containing key-value pairs for the information items - from the lsb_release command data source of the current OS distribution. - - See `lsb_release command output`_ for details about these information - items. - """ - return _distro.lsb_release_info() - - -def distro_release_info(): - """ - Return a dictionary containing key-value pairs for the information items - from the distro release file data source of the current OS distribution. - - See `distro release file`_ for details about these information items. - """ - return _distro.distro_release_info() - - -def uname_info(): - """ - Return a dictionary containing key-value pairs for the information items - from the distro release file data source of the current OS distribution. - """ - return _distro.uname_info() - - -def os_release_attr(attribute): - """ - Return a single named information item from the os-release file data source - of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - - See `os-release file`_ for details about these information items. - """ - return _distro.os_release_attr(attribute) - - -def lsb_release_attr(attribute): - """ - Return a single named information item from the lsb_release command output - data source of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - - See `lsb_release command output`_ for details about these information - items. - """ - return _distro.lsb_release_attr(attribute) - - -def distro_release_attr(attribute): - """ - Return a single named information item from the distro release file - data source of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - - See `distro release file`_ for details about these information items. - """ - return _distro.distro_release_attr(attribute) - - -def uname_attr(attribute): - """ - Return a single named information item from the distro release file - data source of the current OS distribution. - - Parameters: - - * ``attribute`` (string): Key of the information item. - - Returns: - - * (string): Value of the information item, if the item exists. - The empty string, if the item does not exist. - """ - return _distro.uname_attr(attribute) - - -class cached_property(object): - """A version of @property which caches the value. On access, it calls the - underlying function and sets the value in `__dict__` so future accesses - will not re-call the property. - """ - def __init__(self, f): - self._fname = f.__name__ - self._f = f - - def __get__(self, obj, owner): - assert obj is not None, 'call {} on an instance'.format(self._fname) - ret = obj.__dict__[self._fname] = self._f(obj) - return ret - - -class LinuxDistribution(object): - """ - Provides information about a OS distribution. - - This package creates a private module-global instance of this class with - default initialization arguments, that is used by the - `consolidated accessor functions`_ and `single source accessor functions`_. - By using default initialization arguments, that module-global instance - returns data about the current OS distribution (i.e. the distro this - package runs on). - - Normally, it is not necessary to create additional instances of this class. - However, in situations where control is needed over the exact data sources - that are used, instances of this class can be created with a specific - distro release file, or a specific os-release file, or without invoking the - lsb_release command. - """ - - def __init__(self, - include_lsb=True, - os_release_file='', - distro_release_file='', - include_uname=True): - """ - The initialization method of this class gathers information from the - available data sources, and stores that in private instance attributes. - Subsequent access to the information items uses these private instance - attributes, so that the data sources are read only once. - - Parameters: - - * ``include_lsb`` (bool): Controls whether the - `lsb_release command output`_ is included as a data source. - - If the lsb_release command is not available in the program execution - path, the data source for the lsb_release command will be empty. - - * ``os_release_file`` (string): The path name of the - `os-release file`_ that is to be used as a data source. - - An empty string (the default) will cause the default path name to - be used (see `os-release file`_ for details). - - If the specified or defaulted os-release file does not exist, the - data source for the os-release file will be empty. - - * ``distro_release_file`` (string): The path name of the - `distro release file`_ that is to be used as a data source. - - An empty string (the default) will cause a default search algorithm - to be used (see `distro release file`_ for details). - - If the specified distro release file does not exist, or if no default - distro release file can be found, the data source for the distro - release file will be empty. - - * ``include_uname`` (bool): Controls whether uname command output is - included as a data source. If the uname command is not available in - the program execution path the data source for the uname command will - be empty. - - Public instance attributes: - - * ``os_release_file`` (string): The path name of the - `os-release file`_ that is actually used as a data source. The - empty string if no distro release file is used as a data source. - - * ``distro_release_file`` (string): The path name of the - `distro release file`_ that is actually used as a data source. The - empty string if no distro release file is used as a data source. - - * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter. - This controls whether the lsb information will be loaded. - - * ``include_uname`` (bool): The result of the ``include_uname`` - parameter. This controls whether the uname information will - be loaded. - - Raises: - - * :py:exc:`IOError`: Some I/O issue with an os-release file or distro - release file. - - * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had - some issue (other than not being available in the program execution - path). - - * :py:exc:`UnicodeError`: A data source has unexpected characters or - uses an unexpected encoding. - """ - self.os_release_file = os_release_file or \ - os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME) - self.distro_release_file = distro_release_file or '' # updated later - self.include_lsb = include_lsb - self.include_uname = include_uname - - def __repr__(self): - """Return repr of all info - """ - return \ - "LinuxDistribution(" \ - "os_release_file={self.os_release_file!r}, " \ - "distro_release_file={self.distro_release_file!r}, " \ - "include_lsb={self.include_lsb!r}, " \ - "include_uname={self.include_uname!r}, " \ - "_os_release_info={self._os_release_info!r}, " \ - "_lsb_release_info={self._lsb_release_info!r}, " \ - "_distro_release_info={self._distro_release_info!r}, " \ - "_uname_info={self._uname_info!r})".format( - self=self) - - def linux_distribution(self, full_distribution_name=True): - """ - Return information about the OS distribution that is compatible - with Python's :func:`platform.linux_distribution`, supporting a subset - of its parameters. - - For details, see :func:`distro.linux_distribution`. - """ - return ( - self.name() if full_distribution_name else self.id(), - self.version(), - self.codename() - ) - - def id(self): - """Return the distro ID of the OS distribution, as a string. - - For details, see :func:`distro.id`. - """ - def normalize(distro_id, table): - distro_id = distro_id.lower().replace(' ', '_') - return table.get(distro_id, distro_id) - - distro_id = self.os_release_attr('id') - if distro_id: - return normalize(distro_id, NORMALIZED_OS_ID) - - distro_id = self.lsb_release_attr('distributor_id') - if distro_id: - return normalize(distro_id, NORMALIZED_LSB_ID) - - distro_id = self.distro_release_attr('id') - if distro_id: - return normalize(distro_id, NORMALIZED_DISTRO_ID) - - distro_id = self.uname_attr('id') - if distro_id: - return normalize(distro_id, NORMALIZED_DISTRO_ID) - - return '' - - def name(self, pretty=False): - """ - Return the name of the OS distribution, as a string. - - For details, see :func:`distro.name`. - """ - name = self.os_release_attr('name') \ - or self.lsb_release_attr('distributor_id') \ - or self.distro_release_attr('name') \ - or self.uname_attr('name') - if pretty: - name = self.os_release_attr('pretty_name') \ - or self.lsb_release_attr('description') - if not name: - name = self.distro_release_attr('name') \ - or self.uname_attr('name') - version = self.version(pretty=True) - if version: - name = name + ' ' + version - return name or '' - - def version(self, pretty=False, best=False): - """ - Return the version of the OS distribution, as a string. - - For details, see :func:`distro.version`. - """ - versions = [ - self.os_release_attr('version_id'), - self.lsb_release_attr('release'), - self.distro_release_attr('version_id'), - self._parse_distro_release_content( - self.os_release_attr('pretty_name')).get('version_id', ''), - self._parse_distro_release_content( - self.lsb_release_attr('description')).get('version_id', ''), - self.uname_attr('release') - ] - version = '' - if best: - # This algorithm uses the last version in priority order that has - # the best precision. If the versions are not in conflict, that - # does not matter; otherwise, using the last one instead of the - # first one might be considered a surprise. - for v in versions: - if v.count(".") > version.count(".") or version == '': - version = v - else: - for v in versions: - if v != '': - version = v - break - if pretty and version and self.codename(): - version = '{0} ({1})'.format(version, self.codename()) - return version - - def version_parts(self, best=False): - """ - Return the version of the OS distribution, as a tuple of version - numbers. - - For details, see :func:`distro.version_parts`. - """ - version_str = self.version(best=best) - if version_str: - version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?') - matches = version_regex.match(version_str) - if matches: - major, minor, build_number = matches.groups() - return major, minor or '', build_number or '' - return '', '', '' - - def major_version(self, best=False): - """ - Return the major version number of the current distribution. - - For details, see :func:`distro.major_version`. - """ - return self.version_parts(best)[0] - - def minor_version(self, best=False): - """ - Return the minor version number of the current distribution. - - For details, see :func:`distro.minor_version`. - """ - return self.version_parts(best)[1] - - def build_number(self, best=False): - """ - Return the build number of the current distribution. - - For details, see :func:`distro.build_number`. - """ - return self.version_parts(best)[2] - - def like(self): - """ - Return the IDs of distributions that are like the OS distribution. - - For details, see :func:`distro.like`. - """ - return self.os_release_attr('id_like') or '' - - def codename(self): - """ - Return the codename of the OS distribution. - - For details, see :func:`distro.codename`. - """ - try: - # Handle os_release specially since distros might purposefully set - # this to empty string to have no codename - return self._os_release_info['codename'] - except KeyError: - return self.lsb_release_attr('codename') \ - or self.distro_release_attr('codename') \ - or '' - - def info(self, pretty=False, best=False): - """ - Return certain machine-readable information about the OS - distribution. - - For details, see :func:`distro.info`. - """ - return dict( - id=self.id(), - version=self.version(pretty, best), - version_parts=dict( - major=self.major_version(best), - minor=self.minor_version(best), - build_number=self.build_number(best) - ), - like=self.like(), - codename=self.codename(), - ) - - def os_release_info(self): - """ - Return a dictionary containing key-value pairs for the information - items from the os-release file data source of the OS distribution. - - For details, see :func:`distro.os_release_info`. - """ - return self._os_release_info - - def lsb_release_info(self): - """ - Return a dictionary containing key-value pairs for the information - items from the lsb_release command data source of the OS - distribution. - - For details, see :func:`distro.lsb_release_info`. - """ - return self._lsb_release_info - - def distro_release_info(self): - """ - Return a dictionary containing key-value pairs for the information - items from the distro release file data source of the OS - distribution. - - For details, see :func:`distro.distro_release_info`. - """ - return self._distro_release_info - - def uname_info(self): - """ - Return a dictionary containing key-value pairs for the information - items from the uname command data source of the OS distribution. - - For details, see :func:`distro.uname_info`. - """ - return self._uname_info - - def os_release_attr(self, attribute): - """ - Return a single named information item from the os-release file data - source of the OS distribution. - - For details, see :func:`distro.os_release_attr`. - """ - return self._os_release_info.get(attribute, '') - - def lsb_release_attr(self, attribute): - """ - Return a single named information item from the lsb_release command - output data source of the OS distribution. - - For details, see :func:`distro.lsb_release_attr`. - """ - return self._lsb_release_info.get(attribute, '') - - def distro_release_attr(self, attribute): - """ - Return a single named information item from the distro release file - data source of the OS distribution. - - For details, see :func:`distro.distro_release_attr`. - """ - return self._distro_release_info.get(attribute, '') - - def uname_attr(self, attribute): - """ - Return a single named information item from the uname command - output data source of the OS distribution. - - For details, see :func:`distro.uname_release_attr`. - """ - return self._uname_info.get(attribute, '') - - @cached_property - def _os_release_info(self): - """ - Get the information items from the specified os-release file. - - Returns: - A dictionary containing all information items. - """ - if os.path.isfile(self.os_release_file): - with open(self.os_release_file) as release_file: - return self._parse_os_release_content(release_file) - return {} - - @staticmethod - def _parse_os_release_content(lines): - """ - Parse the lines of an os-release file. - - Parameters: - - * lines: Iterable through the lines in the os-release file. - Each line must be a unicode string or a UTF-8 encoded byte - string. - - Returns: - A dictionary containing all information items. - """ - props = {} - lexer = shlex.shlex(lines, posix=True) - lexer.whitespace_split = True - - # The shlex module defines its `wordchars` variable using literals, - # making it dependent on the encoding of the Python source file. - # In Python 2.6 and 2.7, the shlex source file is encoded in - # 'iso-8859-1', and the `wordchars` variable is defined as a byte - # string. This causes a UnicodeDecodeError to be raised when the - # parsed content is a unicode object. The following fix resolves that - # (... but it should be fixed in shlex...): - if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes): - lexer.wordchars = lexer.wordchars.decode('iso-8859-1') - - tokens = list(lexer) - for token in tokens: - # At this point, all shell-like parsing has been done (i.e. - # comments processed, quotes and backslash escape sequences - # processed, multi-line values assembled, trailing newlines - # stripped, etc.), so the tokens are now either: - # * variable assignments: var=value - # * commands or their arguments (not allowed in os-release) - if '=' in token: - k, v = token.split('=', 1) - props[k.lower()] = v - else: - # Ignore any tokens that are not variable assignments - pass - - if 'version_codename' in props: - # os-release added a version_codename field. Use that in - # preference to anything else Note that some distros purposefully - # do not have code names. They should be setting - # version_codename="" - props['codename'] = props['version_codename'] - elif 'ubuntu_codename' in props: - # Same as above but a non-standard field name used on older Ubuntus - props['codename'] = props['ubuntu_codename'] - elif 'version' in props: - # If there is no version_codename, parse it from the version - codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version']) - if codename: - codename = codename.group() - codename = codename.strip('()') - codename = codename.strip(',') - codename = codename.strip() - # codename appears within paranthese. - props['codename'] = codename - - return props - - @cached_property - def _lsb_release_info(self): - """ - Get the information items from the lsb_release command output. - - Returns: - A dictionary containing all information items. - """ - if not self.include_lsb: - return {} - with open(os.devnull, 'w') as devnull: - try: - cmd = ('lsb_release', '-a') - stdout = subprocess.check_output(cmd, stderr=devnull) - except OSError: # Command not found - return {} - content = self._to_str(stdout).splitlines() - return self._parse_lsb_release_content(content) - - @staticmethod - def _parse_lsb_release_content(lines): - """ - Parse the output of the lsb_release command. - - Parameters: - - * lines: Iterable through the lines of the lsb_release output. - Each line must be a unicode string or a UTF-8 encoded byte - string. - - Returns: - A dictionary containing all information items. - """ - props = {} - for line in lines: - kv = line.strip('\n').split(':', 1) - if len(kv) != 2: - # Ignore lines without colon. - continue - k, v = kv - props.update({k.replace(' ', '_').lower(): v.strip()}) - return props - - @cached_property - def _uname_info(self): - with open(os.devnull, 'w') as devnull: - try: - cmd = ('uname', '-rs') - stdout = subprocess.check_output(cmd, stderr=devnull) - except OSError: - return {} - content = self._to_str(stdout).splitlines() - return self._parse_uname_content(content) - - @staticmethod - def _parse_uname_content(lines): - props = {} - match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip()) - if match: - name, version = match.groups() - - # This is to prevent the Linux kernel version from - # appearing as the 'best' version on otherwise - # identifiable distributions. - if name == 'Linux': - return {} - props['id'] = name.lower() - props['name'] = name - props['release'] = version - return props - - @staticmethod - def _to_str(text): - encoding = sys.getfilesystemencoding() - encoding = 'utf-8' if encoding == 'ascii' else encoding - - if sys.version_info[0] >= 3: - if isinstance(text, bytes): - return text.decode(encoding) - else: - if isinstance(text, unicode): # noqa - return text.encode(encoding) - - return text - - @cached_property - def _distro_release_info(self): - """ - Get the information items from the specified distro release file. - - Returns: - A dictionary containing all information items. - """ - if self.distro_release_file: - # If it was specified, we use it and parse what we can, even if - # its file name or content does not match the expected pattern. - distro_info = self._parse_distro_release_file( - self.distro_release_file) - basename = os.path.basename(self.distro_release_file) - # The file name pattern for user-specified distro release files - # is somewhat more tolerant (compared to when searching for the - # file), because we want to use what was specified as best as - # possible. - match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if 'name' in distro_info \ - and 'cloudlinux' in distro_info['name'].lower(): - distro_info['id'] = 'cloudlinux' - elif match: - distro_info['id'] = match.group(1) - return distro_info - else: - try: - basenames = os.listdir(_UNIXCONFDIR) - # We sort for repeatability in cases where there are multiple - # distro specific files; e.g. CentOS, Oracle, Enterprise all - # containing `redhat-release` on top of their own. - basenames.sort() - except OSError: - # This may occur when /etc is not readable but we can't be - # sure about the *-release files. Check common entries of - # /etc for information. If they turn out to not be there the - # error is handled in `_parse_distro_release_file()`. - basenames = ['SuSE-release', - 'arch-release', - 'base-release', - 'centos-release', - 'fedora-release', - 'gentoo-release', - 'mageia-release', - 'mandrake-release', - 'mandriva-release', - 'mandrivalinux-release', - 'manjaro-release', - 'oracle-release', - 'redhat-release', - 'sl-release', - 'slackware-version'] - for basename in basenames: - if basename in _DISTRO_RELEASE_IGNORE_BASENAMES: - continue - match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if match: - filepath = os.path.join(_UNIXCONFDIR, basename) - distro_info = self._parse_distro_release_file(filepath) - if 'name' in distro_info: - # The name is always present if the pattern matches - self.distro_release_file = filepath - distro_info['id'] = match.group(1) - if 'cloudlinux' in distro_info['name'].lower(): - distro_info['id'] = 'cloudlinux' - return distro_info - return {} - - def _parse_distro_release_file(self, filepath): - """ - Parse a distro release file. - - Parameters: - - * filepath: Path name of the distro release file. - - Returns: - A dictionary containing all information items. - """ - try: - with open(filepath) as fp: - # Only parse the first line. For instance, on SLES there - # are multiple lines. We don't want them... - return self._parse_distro_release_content(fp.readline()) - except (OSError, IOError): - # Ignore not being able to read a specific, seemingly version - # related file. - # See https://github.com/nir0s/distro/issues/162 - return {} - - @staticmethod - def _parse_distro_release_content(line): - """ - Parse a line from a distro release file. - - Parameters: - * line: Line from the distro release file. Must be a unicode string - or a UTF-8 encoded byte string. - - Returns: - A dictionary containing all information items. - """ - matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match( - line.strip()[::-1]) - distro_info = {} - if matches: - # regexp ensures non-None - distro_info['name'] = matches.group(3)[::-1] - if matches.group(2): - distro_info['version_id'] = matches.group(2)[::-1] - if matches.group(1): - distro_info['codename'] = matches.group(1)[::-1] - elif line: - distro_info['name'] = line.strip() - return distro_info - - -_distro = LinuxDistribution() - - -def main(): - logger = logging.getLogger(__name__) - logger.setLevel(logging.DEBUG) - logger.addHandler(logging.StreamHandler(sys.stdout)) - - parser = argparse.ArgumentParser(description="OS distro info tool") - parser.add_argument( - '--json', - '-j', - help="Output in machine readable format", - action="store_true") - args = parser.parse_args() - - if args.json: - logger.info(json.dumps(info(), indent=4, sort_keys=True)) - else: - logger.info('Name: %s', name(pretty=True)) - distribution_version = version(pretty=True) - logger.info('Version: %s', distribution_version) - distribution_codename = codename() - logger.info('Codename: %s', distribution_codename) - - -if __name__ == '__main__': - main() diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/__init__.py b/env/Lib/site-packages/pip/_vendor/html5lib/__init__.py deleted file mode 100644 index d1d82f157f884dc65160a41b436258d1aaf12e4c..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -HTML parsing library based on the `WHATWG HTML specification -<https://whatwg.org/html>`_. The parser is designed to be compatible with -existing HTML found in the wild and implements well-defined error recovery that -is largely compatible with modern desktop web browsers. - -Example usage:: - - from pip._vendor import html5lib - with open("my_document.html", "rb") as f: - tree = html5lib.parse(f) - -For convenience, this module re-exports the following names: - -* :func:`~.html5parser.parse` -* :func:`~.html5parser.parseFragment` -* :class:`~.html5parser.HTMLParser` -* :func:`~.treebuilders.getTreeBuilder` -* :func:`~.treewalkers.getTreeWalker` -* :func:`~.serializer.serialize` -""" - -from __future__ import absolute_import, division, unicode_literals - -from .html5parser import HTMLParser, parse, parseFragment -from .treebuilders import getTreeBuilder -from .treewalkers import getTreeWalker -from .serializer import serialize - -__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", - "getTreeWalker", "serialize"] - -# this has to be at the top level, see how setup.py parses this -#: Distribution version number. -__version__ = "1.1" diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 2530cbe0ccc8bd73244b78c78f2dcf36e74eeb85..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-39.pyc deleted file mode 100644 index 8b19e80b849505c197723be209619848dc28f768..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_inputstream.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_inputstream.cpython-39.pyc deleted file mode 100644 index 8fc47691b155fb6d6c129941004b4f0e9aec2a9f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_inputstream.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-39.pyc deleted file mode 100644 index c5eed21d6961fc3bf8743904ab24f7a38a99ddf6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_utils.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_utils.cpython-39.pyc deleted file mode 100644 index e3679b47f3241fbcae1d374b2e221839729b2740..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/_utils.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/constants.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/constants.cpython-39.pyc deleted file mode 100644 index 5970137cbf6ec86e81124cc85908aa737c6ae27b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/constants.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/html5parser.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/html5parser.cpython-39.pyc deleted file mode 100644 index 1c6cea33c4a0e43345bd7b91338b40b77075b430..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/html5parser.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/serializer.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/serializer.cpython-39.pyc deleted file mode 100644 index a4d75e6ec85c5feaf571585a21b770cd58cb4677..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/__pycache__/serializer.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/_ihatexml.py b/env/Lib/site-packages/pip/_vendor/html5lib/_ihatexml.py deleted file mode 100644 index 3ff803c195243984738c6f3f328c78d9c4999cfc..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/_ihatexml.py +++ /dev/null @@ -1,289 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -import re -import warnings - -from .constants import DataLossWarning - -baseChar = """ -[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | -[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | -[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | -[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | -[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | -[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | -[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | -[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | -[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | -[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | -[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | -[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | -[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | -[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | -[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | -[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | -[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | -[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | -[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | -[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | -[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | -[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | -[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | -[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | -[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | -[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | -[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | -[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | -[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | -[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | -#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | -#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | -#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | -[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | -[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | -#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | -[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | -[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | -[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | -[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | -[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | -#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | -[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | -[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | -[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | -[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]""" - -ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]""" - -combiningCharacter = """ -[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | -[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | -[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | -[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | -#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | -[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | -[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | -#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | -[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | -[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | -#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | -[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | -[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | -[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | -[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | -[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | -#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | -[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | -#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | -[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | -[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | -#x3099 | #x309A""" - -digit = """ -[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | -[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | -[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | -[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]""" - -extender = """ -#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | -#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]""" - -letter = " | ".join([baseChar, ideographic]) - -# Without the -name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter, - extender]) -nameFirst = " | ".join([letter, "_"]) - -reChar = re.compile(r"#x([\d|A-F]{4,4})") -reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]") - - -def charStringToList(chars): - charRanges = [item.strip() for item in chars.split(" | ")] - rv = [] - for item in charRanges: - foundMatch = False - for regexp in (reChar, reCharRange): - match = regexp.match(item) - if match is not None: - rv.append([hexToInt(item) for item in match.groups()]) - if len(rv[-1]) == 1: - rv[-1] = rv[-1] * 2 - foundMatch = True - break - if not foundMatch: - assert len(item) == 1 - - rv.append([ord(item)] * 2) - rv = normaliseCharList(rv) - return rv - - -def normaliseCharList(charList): - charList = sorted(charList) - for item in charList: - assert item[1] >= item[0] - rv = [] - i = 0 - while i < len(charList): - j = 1 - rv.append(charList[i]) - while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1: - rv[-1][1] = charList[i + j][1] - j += 1 - i += j - return rv - - -# We don't really support characters above the BMP :( -max_unicode = int("FFFF", 16) - - -def missingRanges(charList): - rv = [] - if charList[0] != 0: - rv.append([0, charList[0][0] - 1]) - for i, item in enumerate(charList[:-1]): - rv.append([item[1] + 1, charList[i + 1][0] - 1]) - if charList[-1][1] != max_unicode: - rv.append([charList[-1][1] + 1, max_unicode]) - return rv - - -def listToRegexpStr(charList): - rv = [] - for item in charList: - if item[0] == item[1]: - rv.append(escapeRegexp(chr(item[0]))) - else: - rv.append(escapeRegexp(chr(item[0])) + "-" + - escapeRegexp(chr(item[1]))) - return "[%s]" % "".join(rv) - - -def hexToInt(hex_str): - return int(hex_str, 16) - - -def escapeRegexp(string): - specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}", - "[", "]", "|", "(", ")", "-") - for char in specialCharacters: - string = string.replace(char, "\\" + char) - - return string - -# output from the above -nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa - -nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa - -# Simpler things -nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\\-'()+,./:=?;!*#@$_%]") - - -class InfosetFilter(object): - replacementRegexp = re.compile(r"U[\dA-F]{5,5}") - - def __init__(self, - dropXmlnsLocalName=False, - dropXmlnsAttrNs=False, - preventDoubleDashComments=False, - preventDashAtCommentEnd=False, - replaceFormFeedCharacters=True, - preventSingleQuotePubid=False): - - self.dropXmlnsLocalName = dropXmlnsLocalName - self.dropXmlnsAttrNs = dropXmlnsAttrNs - - self.preventDoubleDashComments = preventDoubleDashComments - self.preventDashAtCommentEnd = preventDashAtCommentEnd - - self.replaceFormFeedCharacters = replaceFormFeedCharacters - - self.preventSingleQuotePubid = preventSingleQuotePubid - - self.replaceCache = {} - - def coerceAttribute(self, name, namespace=None): - if self.dropXmlnsLocalName and name.startswith("xmlns:"): - warnings.warn("Attributes cannot begin with xmlns", DataLossWarning) - return None - elif (self.dropXmlnsAttrNs and - namespace == "http://www.w3.org/2000/xmlns/"): - warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning) - return None - else: - return self.toXmlName(name) - - def coerceElement(self, name): - return self.toXmlName(name) - - def coerceComment(self, data): - if self.preventDoubleDashComments: - while "--" in data: - warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning) - data = data.replace("--", "- -") - if data.endswith("-"): - warnings.warn("Comments cannot end in a dash", DataLossWarning) - data += " " - return data - - def coerceCharacters(self, data): - if self.replaceFormFeedCharacters: - for _ in range(data.count("\x0C")): - warnings.warn("Text cannot contain U+000C", DataLossWarning) - data = data.replace("\x0C", " ") - # Other non-xml characters - return data - - def coercePubid(self, data): - dataOutput = data - for char in nonPubidCharRegexp.findall(data): - warnings.warn("Coercing non-XML pubid", DataLossWarning) - replacement = self.getReplacementCharacter(char) - dataOutput = dataOutput.replace(char, replacement) - if self.preventSingleQuotePubid and dataOutput.find("'") >= 0: - warnings.warn("Pubid cannot contain single quote", DataLossWarning) - dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'")) - return dataOutput - - def toXmlName(self, name): - nameFirst = name[0] - nameRest = name[1:] - m = nonXmlNameFirstBMPRegexp.match(nameFirst) - if m: - warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning) - nameFirstOutput = self.getReplacementCharacter(nameFirst) - else: - nameFirstOutput = nameFirst - - nameRestOutput = nameRest - replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest)) - for char in replaceChars: - warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning) - replacement = self.getReplacementCharacter(char) - nameRestOutput = nameRestOutput.replace(char, replacement) - return nameFirstOutput + nameRestOutput - - def getReplacementCharacter(self, char): - if char in self.replaceCache: - replacement = self.replaceCache[char] - else: - replacement = self.escapeChar(char) - return replacement - - def fromXmlName(self, name): - for item in set(self.replacementRegexp.findall(name)): - name = name.replace(item, self.unescapeChar(item)) - return name - - def escapeChar(self, char): - replacement = "U%05X" % ord(char) - self.replaceCache[char] = replacement - return replacement - - def unescapeChar(self, charcode): - return chr(int(charcode[1:], 16)) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/_inputstream.py b/env/Lib/site-packages/pip/_vendor/html5lib/_inputstream.py deleted file mode 100644 index e0bb37602c8e2f1f808ba8fdcb1b7f63451fa4f5..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/_inputstream.py +++ /dev/null @@ -1,918 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from pip._vendor.six import text_type -from pip._vendor.six.moves import http_client, urllib - -import codecs -import re -from io import BytesIO, StringIO - -from pip._vendor import webencodings - -from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase -from .constants import _ReparseException -from . import _utils - -# Non-unicode versions of constants for use in the pre-parser -spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters]) -asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters]) -asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase]) -spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"]) - - -invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa - -if _utils.supports_lone_surrogates: - # Use one extra step of indirection and create surrogates with - # eval. Not using this indirection would introduce an illegal - # unicode literal on platforms not supporting such lone - # surrogates. - assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1 - invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] + - eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used - "]") -else: - invalid_unicode_re = re.compile(invalid_unicode_no_surrogate) - -non_bmp_invalid_codepoints = {0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, - 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, - 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, - 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, - 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, - 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, - 0x10FFFE, 0x10FFFF} - -ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]") - -# Cache for charsUntil() -charsUntilRegEx = {} - - -class BufferedStream(object): - """Buffering for streams that do not have buffering of their own - - The buffer is implemented as a list of chunks on the assumption that - joining many strings will be slow since it is O(n**2) - """ - - def __init__(self, stream): - self.stream = stream - self.buffer = [] - self.position = [-1, 0] # chunk number, offset - - def tell(self): - pos = 0 - for chunk in self.buffer[:self.position[0]]: - pos += len(chunk) - pos += self.position[1] - return pos - - def seek(self, pos): - assert pos <= self._bufferedBytes() - offset = pos - i = 0 - while len(self.buffer[i]) < offset: - offset -= len(self.buffer[i]) - i += 1 - self.position = [i, offset] - - def read(self, bytes): - if not self.buffer: - return self._readStream(bytes) - elif (self.position[0] == len(self.buffer) and - self.position[1] == len(self.buffer[-1])): - return self._readStream(bytes) - else: - return self._readFromBuffer(bytes) - - def _bufferedBytes(self): - return sum([len(item) for item in self.buffer]) - - def _readStream(self, bytes): - data = self.stream.read(bytes) - self.buffer.append(data) - self.position[0] += 1 - self.position[1] = len(data) - return data - - def _readFromBuffer(self, bytes): - remainingBytes = bytes - rv = [] - bufferIndex = self.position[0] - bufferOffset = self.position[1] - while bufferIndex < len(self.buffer) and remainingBytes != 0: - assert remainingBytes > 0 - bufferedData = self.buffer[bufferIndex] - - if remainingBytes <= len(bufferedData) - bufferOffset: - bytesToRead = remainingBytes - self.position = [bufferIndex, bufferOffset + bytesToRead] - else: - bytesToRead = len(bufferedData) - bufferOffset - self.position = [bufferIndex, len(bufferedData)] - bufferIndex += 1 - rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead]) - remainingBytes -= bytesToRead - - bufferOffset = 0 - - if remainingBytes: - rv.append(self._readStream(remainingBytes)) - - return b"".join(rv) - - -def HTMLInputStream(source, **kwargs): - # Work around Python bug #20007: read(0) closes the connection. - # http://bugs.python.org/issue20007 - if (isinstance(source, http_client.HTTPResponse) or - # Also check for addinfourl wrapping HTTPResponse - (isinstance(source, urllib.response.addbase) and - isinstance(source.fp, http_client.HTTPResponse))): - isUnicode = False - elif hasattr(source, "read"): - isUnicode = isinstance(source.read(0), text_type) - else: - isUnicode = isinstance(source, text_type) - - if isUnicode: - encodings = [x for x in kwargs if x.endswith("_encoding")] - if encodings: - raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings) - - return HTMLUnicodeInputStream(source, **kwargs) - else: - return HTMLBinaryInputStream(source, **kwargs) - - -class HTMLUnicodeInputStream(object): - """Provides a unicode stream of characters to the HTMLTokenizer. - - This class takes care of character encoding and removing or replacing - incorrect byte-sequences and also provides column and line tracking. - - """ - - _defaultChunkSize = 10240 - - def __init__(self, source): - """Initialises the HTMLInputStream. - - HTMLInputStream(source, [encoding]) -> Normalized stream from source - for use by html5lib. - - source can be either a file-object, local filename or a string. - - The optional encoding parameter must be a string that indicates - the encoding. If specified, that encoding will be used, - regardless of any BOM or later declaration (such as in a meta - element) - - """ - - if not _utils.supports_lone_surrogates: - # Such platforms will have already checked for such - # surrogate errors, so no need to do this checking. - self.reportCharacterErrors = None - elif len("\U0010FFFF") == 1: - self.reportCharacterErrors = self.characterErrorsUCS4 - else: - self.reportCharacterErrors = self.characterErrorsUCS2 - - # List of where new lines occur - self.newLines = [0] - - self.charEncoding = (lookupEncoding("utf-8"), "certain") - self.dataStream = self.openStream(source) - - self.reset() - - def reset(self): - self.chunk = "" - self.chunkSize = 0 - self.chunkOffset = 0 - self.errors = [] - - # number of (complete) lines in previous chunks - self.prevNumLines = 0 - # number of columns in the last line of the previous chunk - self.prevNumCols = 0 - - # Deal with CR LF and surrogates split over chunk boundaries - self._bufferedCharacter = None - - def openStream(self, source): - """Produces a file object from source. - - source can be either a file object, local filename or a string. - - """ - # Already a file object - if hasattr(source, 'read'): - stream = source - else: - stream = StringIO(source) - - return stream - - def _position(self, offset): - chunk = self.chunk - nLines = chunk.count('\n', 0, offset) - positionLine = self.prevNumLines + nLines - lastLinePos = chunk.rfind('\n', 0, offset) - if lastLinePos == -1: - positionColumn = self.prevNumCols + offset - else: - positionColumn = offset - (lastLinePos + 1) - return (positionLine, positionColumn) - - def position(self): - """Returns (line, col) of the current position in the stream.""" - line, col = self._position(self.chunkOffset) - return (line + 1, col) - - def char(self): - """ Read one character from the stream or queue if available. Return - EOF when EOF is reached. - """ - # Read a new chunk from the input stream if necessary - if self.chunkOffset >= self.chunkSize: - if not self.readChunk(): - return EOF - - chunkOffset = self.chunkOffset - char = self.chunk[chunkOffset] - self.chunkOffset = chunkOffset + 1 - - return char - - def readChunk(self, chunkSize=None): - if chunkSize is None: - chunkSize = self._defaultChunkSize - - self.prevNumLines, self.prevNumCols = self._position(self.chunkSize) - - self.chunk = "" - self.chunkSize = 0 - self.chunkOffset = 0 - - data = self.dataStream.read(chunkSize) - - # Deal with CR LF and surrogates broken across chunks - if self._bufferedCharacter: - data = self._bufferedCharacter + data - self._bufferedCharacter = None - elif not data: - # We have no more data, bye-bye stream - return False - - if len(data) > 1: - lastv = ord(data[-1]) - if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF: - self._bufferedCharacter = data[-1] - data = data[:-1] - - if self.reportCharacterErrors: - self.reportCharacterErrors(data) - - # Replace invalid characters - data = data.replace("\r\n", "\n") - data = data.replace("\r", "\n") - - self.chunk = data - self.chunkSize = len(data) - - return True - - def characterErrorsUCS4(self, data): - for _ in range(len(invalid_unicode_re.findall(data))): - self.errors.append("invalid-codepoint") - - def characterErrorsUCS2(self, data): - # Someone picked the wrong compile option - # You lose - skip = False - for match in invalid_unicode_re.finditer(data): - if skip: - continue - codepoint = ord(match.group()) - pos = match.start() - # Pretty sure there should be endianness issues here - if _utils.isSurrogatePair(data[pos:pos + 2]): - # We have a surrogate pair! - char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2]) - if char_val in non_bmp_invalid_codepoints: - self.errors.append("invalid-codepoint") - skip = True - elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and - pos == len(data) - 1): - self.errors.append("invalid-codepoint") - else: - skip = False - self.errors.append("invalid-codepoint") - - def charsUntil(self, characters, opposite=False): - """ Returns a string of characters from the stream up to but not - including any character in 'characters' or EOF. 'characters' must be - a container that supports the 'in' method and iteration over its - characters. - """ - - # Use a cache of regexps to find the required characters - try: - chars = charsUntilRegEx[(characters, opposite)] - except KeyError: - if __debug__: - for c in characters: - assert(ord(c) < 128) - regex = "".join(["\\x%02x" % ord(c) for c in characters]) - if not opposite: - regex = "^%s" % regex - chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex) - - rv = [] - - while True: - # Find the longest matching prefix - m = chars.match(self.chunk, self.chunkOffset) - if m is None: - # If nothing matched, and it wasn't because we ran out of chunk, - # then stop - if self.chunkOffset != self.chunkSize: - break - else: - end = m.end() - # If not the whole chunk matched, return everything - # up to the part that didn't match - if end != self.chunkSize: - rv.append(self.chunk[self.chunkOffset:end]) - self.chunkOffset = end - break - # If the whole remainder of the chunk matched, - # use it all and read the next chunk - rv.append(self.chunk[self.chunkOffset:]) - if not self.readChunk(): - # Reached EOF - break - - r = "".join(rv) - return r - - def unget(self, char): - # Only one character is allowed to be ungotten at once - it must - # be consumed again before any further call to unget - if char is not EOF: - if self.chunkOffset == 0: - # unget is called quite rarely, so it's a good idea to do - # more work here if it saves a bit of work in the frequently - # called char and charsUntil. - # So, just prepend the ungotten character onto the current - # chunk: - self.chunk = char + self.chunk - self.chunkSize += 1 - else: - self.chunkOffset -= 1 - assert self.chunk[self.chunkOffset] == char - - -class HTMLBinaryInputStream(HTMLUnicodeInputStream): - """Provides a unicode stream of characters to the HTMLTokenizer. - - This class takes care of character encoding and removing or replacing - incorrect byte-sequences and also provides column and line tracking. - - """ - - def __init__(self, source, override_encoding=None, transport_encoding=None, - same_origin_parent_encoding=None, likely_encoding=None, - default_encoding="windows-1252", useChardet=True): - """Initialises the HTMLInputStream. - - HTMLInputStream(source, [encoding]) -> Normalized stream from source - for use by html5lib. - - source can be either a file-object, local filename or a string. - - The optional encoding parameter must be a string that indicates - the encoding. If specified, that encoding will be used, - regardless of any BOM or later declaration (such as in a meta - element) - - """ - # Raw Stream - for unicode objects this will encode to utf-8 and set - # self.charEncoding as appropriate - self.rawStream = self.openStream(source) - - HTMLUnicodeInputStream.__init__(self, self.rawStream) - - # Encoding Information - # Number of bytes to use when looking for a meta element with - # encoding information - self.numBytesMeta = 1024 - # Number of bytes to use when using detecting encoding using chardet - self.numBytesChardet = 100 - # Things from args - self.override_encoding = override_encoding - self.transport_encoding = transport_encoding - self.same_origin_parent_encoding = same_origin_parent_encoding - self.likely_encoding = likely_encoding - self.default_encoding = default_encoding - - # Determine encoding - self.charEncoding = self.determineEncoding(useChardet) - assert self.charEncoding[0] is not None - - # Call superclass - self.reset() - - def reset(self): - self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace') - HTMLUnicodeInputStream.reset(self) - - def openStream(self, source): - """Produces a file object from source. - - source can be either a file object, local filename or a string. - - """ - # Already a file object - if hasattr(source, 'read'): - stream = source - else: - stream = BytesIO(source) - - try: - stream.seek(stream.tell()) - except Exception: - stream = BufferedStream(stream) - - return stream - - def determineEncoding(self, chardet=True): - # BOMs take precedence over everything - # This will also read past the BOM if present - charEncoding = self.detectBOM(), "certain" - if charEncoding[0] is not None: - return charEncoding - - # If we've been overridden, we've been overridden - charEncoding = lookupEncoding(self.override_encoding), "certain" - if charEncoding[0] is not None: - return charEncoding - - # Now check the transport layer - charEncoding = lookupEncoding(self.transport_encoding), "certain" - if charEncoding[0] is not None: - return charEncoding - - # Look for meta elements with encoding information - charEncoding = self.detectEncodingMeta(), "tentative" - if charEncoding[0] is not None: - return charEncoding - - # Parent document encoding - charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative" - if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"): - return charEncoding - - # "likely" encoding - charEncoding = lookupEncoding(self.likely_encoding), "tentative" - if charEncoding[0] is not None: - return charEncoding - - # Guess with chardet, if available - if chardet: - try: - from pip._vendor.chardet.universaldetector import UniversalDetector - except ImportError: - pass - else: - buffers = [] - detector = UniversalDetector() - while not detector.done: - buffer = self.rawStream.read(self.numBytesChardet) - assert isinstance(buffer, bytes) - if not buffer: - break - buffers.append(buffer) - detector.feed(buffer) - detector.close() - encoding = lookupEncoding(detector.result['encoding']) - self.rawStream.seek(0) - if encoding is not None: - return encoding, "tentative" - - # Try the default encoding - charEncoding = lookupEncoding(self.default_encoding), "tentative" - if charEncoding[0] is not None: - return charEncoding - - # Fallback to html5lib's default if even that hasn't worked - return lookupEncoding("windows-1252"), "tentative" - - def changeEncoding(self, newEncoding): - assert self.charEncoding[1] != "certain" - newEncoding = lookupEncoding(newEncoding) - if newEncoding is None: - return - if newEncoding.name in ("utf-16be", "utf-16le"): - newEncoding = lookupEncoding("utf-8") - assert newEncoding is not None - elif newEncoding == self.charEncoding[0]: - self.charEncoding = (self.charEncoding[0], "certain") - else: - self.rawStream.seek(0) - self.charEncoding = (newEncoding, "certain") - self.reset() - raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding)) - - def detectBOM(self): - """Attempts to detect at BOM at the start of the stream. If - an encoding can be determined from the BOM return the name of the - encoding otherwise return None""" - bomDict = { - codecs.BOM_UTF8: 'utf-8', - codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be', - codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be' - } - - # Go to beginning of file and read in 4 bytes - string = self.rawStream.read(4) - assert isinstance(string, bytes) - - # Try detecting the BOM using bytes from the string - encoding = bomDict.get(string[:3]) # UTF-8 - seek = 3 - if not encoding: - # Need to detect UTF-32 before UTF-16 - encoding = bomDict.get(string) # UTF-32 - seek = 4 - if not encoding: - encoding = bomDict.get(string[:2]) # UTF-16 - seek = 2 - - # Set the read position past the BOM if one was found, otherwise - # set it to the start of the stream - if encoding: - self.rawStream.seek(seek) - return lookupEncoding(encoding) - else: - self.rawStream.seek(0) - return None - - def detectEncodingMeta(self): - """Report the encoding declared by the meta element - """ - buffer = self.rawStream.read(self.numBytesMeta) - assert isinstance(buffer, bytes) - parser = EncodingParser(buffer) - self.rawStream.seek(0) - encoding = parser.getEncoding() - - if encoding is not None and encoding.name in ("utf-16be", "utf-16le"): - encoding = lookupEncoding("utf-8") - - return encoding - - -class EncodingBytes(bytes): - """String-like object with an associated position and various extra methods - If the position is ever greater than the string length then an exception is - raised""" - def __new__(self, value): - assert isinstance(value, bytes) - return bytes.__new__(self, value.lower()) - - def __init__(self, value): - # pylint:disable=unused-argument - self._position = -1 - - def __iter__(self): - return self - - def __next__(self): - p = self._position = self._position + 1 - if p >= len(self): - raise StopIteration - elif p < 0: - raise TypeError - return self[p:p + 1] - - def next(self): - # Py2 compat - return self.__next__() - - def previous(self): - p = self._position - if p >= len(self): - raise StopIteration - elif p < 0: - raise TypeError - self._position = p = p - 1 - return self[p:p + 1] - - def setPosition(self, position): - if self._position >= len(self): - raise StopIteration - self._position = position - - def getPosition(self): - if self._position >= len(self): - raise StopIteration - if self._position >= 0: - return self._position - else: - return None - - position = property(getPosition, setPosition) - - def getCurrentByte(self): - return self[self.position:self.position + 1] - - currentByte = property(getCurrentByte) - - def skip(self, chars=spaceCharactersBytes): - """Skip past a list of characters""" - p = self.position # use property for the error-checking - while p < len(self): - c = self[p:p + 1] - if c not in chars: - self._position = p - return c - p += 1 - self._position = p - return None - - def skipUntil(self, chars): - p = self.position - while p < len(self): - c = self[p:p + 1] - if c in chars: - self._position = p - return c - p += 1 - self._position = p - return None - - def matchBytes(self, bytes): - """Look for a sequence of bytes at the start of a string. If the bytes - are found return True and advance the position to the byte after the - match. Otherwise return False and leave the position alone""" - rv = self.startswith(bytes, self.position) - if rv: - self.position += len(bytes) - return rv - - def jumpTo(self, bytes): - """Look for the next sequence of bytes matching a given sequence. If - a match is found advance the position to the last byte of the match""" - try: - self._position = self.index(bytes, self.position) + len(bytes) - 1 - except ValueError: - raise StopIteration - return True - - -class EncodingParser(object): - """Mini parser for detecting character encoding from meta elements""" - - def __init__(self, data): - """string - the data to work on for encoding detection""" - self.data = EncodingBytes(data) - self.encoding = None - - def getEncoding(self): - if b"<meta" not in self.data: - return None - - methodDispatch = ( - (b"<!--", self.handleComment), - (b"<meta", self.handleMeta), - (b"</", self.handlePossibleEndTag), - (b"<!", self.handleOther), - (b"<?", self.handleOther), - (b"<", self.handlePossibleStartTag)) - for _ in self.data: - keepParsing = True - try: - self.data.jumpTo(b"<") - except StopIteration: - break - for key, method in methodDispatch: - if self.data.matchBytes(key): - try: - keepParsing = method() - break - except StopIteration: - keepParsing = False - break - if not keepParsing: - break - - return self.encoding - - def handleComment(self): - """Skip over comments""" - return self.data.jumpTo(b"-->") - - def handleMeta(self): - if self.data.currentByte not in spaceCharactersBytes: - # if we have <meta not followed by a space so just keep going - return True - # We have a valid meta element we want to search for attributes - hasPragma = False - pendingEncoding = None - while True: - # Try to find the next attribute after the current position - attr = self.getAttribute() - if attr is None: - return True - else: - if attr[0] == b"http-equiv": - hasPragma = attr[1] == b"content-type" - if hasPragma and pendingEncoding is not None: - self.encoding = pendingEncoding - return False - elif attr[0] == b"charset": - tentativeEncoding = attr[1] - codec = lookupEncoding(tentativeEncoding) - if codec is not None: - self.encoding = codec - return False - elif attr[0] == b"content": - contentParser = ContentAttrParser(EncodingBytes(attr[1])) - tentativeEncoding = contentParser.parse() - if tentativeEncoding is not None: - codec = lookupEncoding(tentativeEncoding) - if codec is not None: - if hasPragma: - self.encoding = codec - return False - else: - pendingEncoding = codec - - def handlePossibleStartTag(self): - return self.handlePossibleTag(False) - - def handlePossibleEndTag(self): - next(self.data) - return self.handlePossibleTag(True) - - def handlePossibleTag(self, endTag): - data = self.data - if data.currentByte not in asciiLettersBytes: - # If the next byte is not an ascii letter either ignore this - # fragment (possible start tag case) or treat it according to - # handleOther - if endTag: - data.previous() - self.handleOther() - return True - - c = data.skipUntil(spacesAngleBrackets) - if c == b"<": - # return to the first step in the overall "two step" algorithm - # reprocessing the < byte - data.previous() - else: - # Read all attributes - attr = self.getAttribute() - while attr is not None: - attr = self.getAttribute() - return True - - def handleOther(self): - return self.data.jumpTo(b">") - - def getAttribute(self): - """Return a name,value pair for the next attribute in the stream, - if one is found, or None""" - data = self.data - # Step 1 (skip chars) - c = data.skip(spaceCharactersBytes | frozenset([b"/"])) - assert c is None or len(c) == 1 - # Step 2 - if c in (b">", None): - return None - # Step 3 - attrName = [] - attrValue = [] - # Step 4 attribute name - while True: - if c == b"=" and attrName: - break - elif c in spaceCharactersBytes: - # Step 6! - c = data.skip() - break - elif c in (b"/", b">"): - return b"".join(attrName), b"" - elif c in asciiUppercaseBytes: - attrName.append(c.lower()) - elif c is None: - return None - else: - attrName.append(c) - # Step 5 - c = next(data) - # Step 7 - if c != b"=": - data.previous() - return b"".join(attrName), b"" - # Step 8 - next(data) - # Step 9 - c = data.skip() - # Step 10 - if c in (b"'", b'"'): - # 10.1 - quoteChar = c - while True: - # 10.2 - c = next(data) - # 10.3 - if c == quoteChar: - next(data) - return b"".join(attrName), b"".join(attrValue) - # 10.4 - elif c in asciiUppercaseBytes: - attrValue.append(c.lower()) - # 10.5 - else: - attrValue.append(c) - elif c == b">": - return b"".join(attrName), b"" - elif c in asciiUppercaseBytes: - attrValue.append(c.lower()) - elif c is None: - return None - else: - attrValue.append(c) - # Step 11 - while True: - c = next(data) - if c in spacesAngleBrackets: - return b"".join(attrName), b"".join(attrValue) - elif c in asciiUppercaseBytes: - attrValue.append(c.lower()) - elif c is None: - return None - else: - attrValue.append(c) - - -class ContentAttrParser(object): - def __init__(self, data): - assert isinstance(data, bytes) - self.data = data - - def parse(self): - try: - # Check if the attr name is charset - # otherwise return - self.data.jumpTo(b"charset") - self.data.position += 1 - self.data.skip() - if not self.data.currentByte == b"=": - # If there is no = sign keep looking for attrs - return None - self.data.position += 1 - self.data.skip() - # Look for an encoding between matching quote marks - if self.data.currentByte in (b'"', b"'"): - quoteMark = self.data.currentByte - self.data.position += 1 - oldPosition = self.data.position - if self.data.jumpTo(quoteMark): - return self.data[oldPosition:self.data.position] - else: - return None - else: - # Unquoted value - oldPosition = self.data.position - try: - self.data.skipUntil(spaceCharactersBytes) - return self.data[oldPosition:self.data.position] - except StopIteration: - # Return the whole remaining value - return self.data[oldPosition:] - except StopIteration: - return None - - -def lookupEncoding(encoding): - """Return the python codec name corresponding to an encoding or None if the - string doesn't correspond to a valid encoding.""" - if isinstance(encoding, bytes): - try: - encoding = encoding.decode("ascii") - except UnicodeDecodeError: - return None - - if encoding is not None: - try: - return webencodings.lookup(encoding) - except AttributeError: - return None - else: - return None diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/_tokenizer.py b/env/Lib/site-packages/pip/_vendor/html5lib/_tokenizer.py deleted file mode 100644 index 5f00253e2f67b6f438451bb907480d06ec6c094e..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/_tokenizer.py +++ /dev/null @@ -1,1735 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from pip._vendor.six import unichr as chr - -from collections import deque, OrderedDict -from sys import version_info - -from .constants import spaceCharacters -from .constants import entities -from .constants import asciiLetters, asciiUpper2Lower -from .constants import digits, hexDigits, EOF -from .constants import tokenTypes, tagTokenTypes -from .constants import replacementCharacters - -from ._inputstream import HTMLInputStream - -from ._trie import Trie - -entitiesTrie = Trie(entities) - -if version_info >= (3, 7): - attributeMap = dict -else: - attributeMap = OrderedDict - - -class HTMLTokenizer(object): - """ This class takes care of tokenizing HTML. - - * self.currentToken - Holds the token that is currently being processed. - - * self.state - Holds a reference to the method to be invoked... XXX - - * self.stream - Points to HTMLInputStream object. - """ - - def __init__(self, stream, parser=None, **kwargs): - - self.stream = HTMLInputStream(stream, **kwargs) - self.parser = parser - - # Setup the initial tokenizer state - self.escapeFlag = False - self.lastFourChars = [] - self.state = self.dataState - self.escape = False - - # The current token being created - self.currentToken = None - super(HTMLTokenizer, self).__init__() - - def __iter__(self): - """ This is where the magic happens. - - We do our usually processing through the states and when we have a token - to return we yield the token which pauses processing until the next token - is requested. - """ - self.tokenQueue = deque([]) - # Start processing. When EOF is reached self.state will return False - # instead of True and the loop will terminate. - while self.state(): - while self.stream.errors: - yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)} - while self.tokenQueue: - yield self.tokenQueue.popleft() - - def consumeNumberEntity(self, isHex): - """This function returns either U+FFFD or the character based on the - decimal or hexadecimal representation. It also discards ";" if present. - If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. - """ - - allowed = digits - radix = 10 - if isHex: - allowed = hexDigits - radix = 16 - - charStack = [] - - # Consume all the characters that are in range while making sure we - # don't hit an EOF. - c = self.stream.char() - while c in allowed and c is not EOF: - charStack.append(c) - c = self.stream.char() - - # Convert the set of characters consumed to an int. - charAsInt = int("".join(charStack), radix) - - # Certain characters get replaced with others - if charAsInt in replacementCharacters: - char = replacementCharacters[charAsInt] - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "illegal-codepoint-for-numeric-entity", - "datavars": {"charAsInt": charAsInt}}) - elif ((0xD800 <= charAsInt <= 0xDFFF) or - (charAsInt > 0x10FFFF)): - char = "\uFFFD" - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "illegal-codepoint-for-numeric-entity", - "datavars": {"charAsInt": charAsInt}}) - else: - # Should speed up this check somehow (e.g. move the set to a constant) - if ((0x0001 <= charAsInt <= 0x0008) or - (0x000E <= charAsInt <= 0x001F) or - (0x007F <= charAsInt <= 0x009F) or - (0xFDD0 <= charAsInt <= 0xFDEF) or - charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE, - 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, - 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, - 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, - 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, - 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, - 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, - 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, - 0xFFFFF, 0x10FFFE, 0x10FFFF])): - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": - "illegal-codepoint-for-numeric-entity", - "datavars": {"charAsInt": charAsInt}}) - try: - # Try/except needed as UCS-2 Python builds' unichar only works - # within the BMP. - char = chr(charAsInt) - except ValueError: - v = charAsInt - 0x10000 - char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF)) - - # Discard the ; if present. Otherwise, put it back on the queue and - # invoke parseError on parser. - if c != ";": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "numeric-entity-without-semicolon"}) - self.stream.unget(c) - - return char - - def consumeEntity(self, allowedChar=None, fromAttribute=False): - # Initialise to the default output for when no entity is matched - output = "&" - - charStack = [self.stream.char()] - if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or - (allowedChar is not None and allowedChar == charStack[0])): - self.stream.unget(charStack[0]) - - elif charStack[0] == "#": - # Read the next character to see if it's hex or decimal - hex = False - charStack.append(self.stream.char()) - if charStack[-1] in ("x", "X"): - hex = True - charStack.append(self.stream.char()) - - # charStack[-1] should be the first digit - if (hex and charStack[-1] in hexDigits) \ - or (not hex and charStack[-1] in digits): - # At least one digit found, so consume the whole number - self.stream.unget(charStack[-1]) - output = self.consumeNumberEntity(hex) - else: - # No digits found - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "expected-numeric-entity"}) - self.stream.unget(charStack.pop()) - output = "&" + "".join(charStack) - - else: - # At this point in the process might have named entity. Entities - # are stored in the global variable "entities". - # - # Consume characters and compare to these to a substring of the - # entity names in the list until the substring no longer matches. - while (charStack[-1] is not EOF): - if not entitiesTrie.has_keys_with_prefix("".join(charStack)): - break - charStack.append(self.stream.char()) - - # At this point we have a string that starts with some characters - # that may match an entity - # Try to find the longest entity the string will match to take care - # of ¬i for instance. - try: - entityName = entitiesTrie.longest_prefix("".join(charStack[:-1])) - entityLength = len(entityName) - except KeyError: - entityName = None - - if entityName is not None: - if entityName[-1] != ";": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "named-entity-without-semicolon"}) - if (entityName[-1] != ";" and fromAttribute and - (charStack[entityLength] in asciiLetters or - charStack[entityLength] in digits or - charStack[entityLength] == "=")): - self.stream.unget(charStack.pop()) - output = "&" + "".join(charStack) - else: - output = entities[entityName] - self.stream.unget(charStack.pop()) - output += "".join(charStack[entityLength:]) - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-named-entity"}) - self.stream.unget(charStack.pop()) - output = "&" + "".join(charStack) - - if fromAttribute: - self.currentToken["data"][-1][1] += output - else: - if output in spaceCharacters: - tokenType = "SpaceCharacters" - else: - tokenType = "Characters" - self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output}) - - def processEntityInAttribute(self, allowedChar): - """This method replaces the need for "entityInAttributeValueState". - """ - self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) - - def emitCurrentToken(self): - """This method is a generic handler for emitting the tags. It also sets - the state to "data" because that's what's needed after a token has been - emitted. - """ - token = self.currentToken - # Add token to the queue to be yielded - if (token["type"] in tagTokenTypes): - token["name"] = token["name"].translate(asciiUpper2Lower) - if token["type"] == tokenTypes["StartTag"]: - raw = token["data"] - data = attributeMap(raw) - if len(raw) > len(data): - # we had some duplicated attribute, fix so first wins - data.update(raw[::-1]) - token["data"] = data - - if token["type"] == tokenTypes["EndTag"]: - if token["data"]: - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "attributes-in-end-tag"}) - if token["selfClosing"]: - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "self-closing-flag-on-end-tag"}) - self.tokenQueue.append(token) - self.state = self.dataState - - # Below are the various tokenizer states worked out. - def dataState(self): - data = self.stream.char() - if data == "&": - self.state = self.entityDataState - elif data == "<": - self.state = self.tagOpenState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\u0000"}) - elif data is EOF: - # Tokenization ends. - return False - elif data in spaceCharacters: - # Directly after emitting a token you switch back to the "data - # state". At that point spaceCharacters are important so they are - # emitted separately. - self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": - data + self.stream.charsUntil(spaceCharacters, True)}) - # No need to update lastFourChars here, since the first space will - # have already been appended to lastFourChars and will have broken - # any <!-- or --> sequences - else: - chars = self.stream.charsUntil(("&", "<", "\u0000")) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + chars}) - return True - - def entityDataState(self): - self.consumeEntity() - self.state = self.dataState - return True - - def rcdataState(self): - data = self.stream.char() - if data == "&": - self.state = self.characterReferenceInRcdata - elif data == "<": - self.state = self.rcdataLessThanSignState - elif data == EOF: - # Tokenization ends. - return False - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - elif data in spaceCharacters: - # Directly after emitting a token you switch back to the "data - # state". At that point spaceCharacters are important so they are - # emitted separately. - self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": - data + self.stream.charsUntil(spaceCharacters, True)}) - # No need to update lastFourChars here, since the first space will - # have already been appended to lastFourChars and will have broken - # any <!-- or --> sequences - else: - chars = self.stream.charsUntil(("&", "<", "\u0000")) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + chars}) - return True - - def characterReferenceInRcdata(self): - self.consumeEntity() - self.state = self.rcdataState - return True - - def rawtextState(self): - data = self.stream.char() - if data == "<": - self.state = self.rawtextLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - elif data == EOF: - # Tokenization ends. - return False - else: - chars = self.stream.charsUntil(("<", "\u0000")) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + chars}) - return True - - def scriptDataState(self): - data = self.stream.char() - if data == "<": - self.state = self.scriptDataLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - elif data == EOF: - # Tokenization ends. - return False - else: - chars = self.stream.charsUntil(("<", "\u0000")) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + chars}) - return True - - def plaintextState(self): - data = self.stream.char() - if data == EOF: - # Tokenization ends. - return False - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + self.stream.charsUntil("\u0000")}) - return True - - def tagOpenState(self): - data = self.stream.char() - if data == "!": - self.state = self.markupDeclarationOpenState - elif data == "/": - self.state = self.closeTagOpenState - elif data in asciiLetters: - self.currentToken = {"type": tokenTypes["StartTag"], - "name": data, "data": [], - "selfClosing": False, - "selfClosingAcknowledged": False} - self.state = self.tagNameState - elif data == ">": - # XXX In theory it could be something besides a tag name. But - # do we really care? - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-tag-name-but-got-right-bracket"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"}) - self.state = self.dataState - elif data == "?": - # XXX In theory it could be something besides a tag name. But - # do we really care? - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-tag-name-but-got-question-mark"}) - self.stream.unget(data) - self.state = self.bogusCommentState - else: - # XXX - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-tag-name"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.stream.unget(data) - self.state = self.dataState - return True - - def closeTagOpenState(self): - data = self.stream.char() - if data in asciiLetters: - self.currentToken = {"type": tokenTypes["EndTag"], "name": data, - "data": [], "selfClosing": False} - self.state = self.tagNameState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-closing-tag-but-got-right-bracket"}) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-closing-tag-but-got-eof"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) - self.state = self.dataState - else: - # XXX data can be _'_... - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-closing-tag-but-got-char", - "datavars": {"data": data}}) - self.stream.unget(data) - self.state = self.bogusCommentState - return True - - def tagNameState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeAttributeNameState - elif data == ">": - self.emitCurrentToken() - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-tag-name"}) - self.state = self.dataState - elif data == "/": - self.state = self.selfClosingStartTagState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["name"] += "\uFFFD" - else: - self.currentToken["name"] += data - # (Don't use charsUntil here, because tag names are - # very short and it's faster to not do anything fancy) - return True - - def rcdataLessThanSignState(self): - data = self.stream.char() - if data == "/": - self.temporaryBuffer = "" - self.state = self.rcdataEndTagOpenState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.stream.unget(data) - self.state = self.rcdataState - return True - - def rcdataEndTagOpenState(self): - data = self.stream.char() - if data in asciiLetters: - self.temporaryBuffer += data - self.state = self.rcdataEndTagNameState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) - self.stream.unget(data) - self.state = self.rcdataState - return True - - def rcdataEndTagNameState(self): - appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() - data = self.stream.char() - if data in spaceCharacters and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.beforeAttributeNameState - elif data == "/" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.selfClosingStartTagState - elif data == ">" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.emitCurrentToken() - self.state = self.dataState - elif data in asciiLetters: - self.temporaryBuffer += data - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "</" + self.temporaryBuffer}) - self.stream.unget(data) - self.state = self.rcdataState - return True - - def rawtextLessThanSignState(self): - data = self.stream.char() - if data == "/": - self.temporaryBuffer = "" - self.state = self.rawtextEndTagOpenState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.stream.unget(data) - self.state = self.rawtextState - return True - - def rawtextEndTagOpenState(self): - data = self.stream.char() - if data in asciiLetters: - self.temporaryBuffer += data - self.state = self.rawtextEndTagNameState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) - self.stream.unget(data) - self.state = self.rawtextState - return True - - def rawtextEndTagNameState(self): - appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() - data = self.stream.char() - if data in spaceCharacters and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.beforeAttributeNameState - elif data == "/" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.selfClosingStartTagState - elif data == ">" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.emitCurrentToken() - self.state = self.dataState - elif data in asciiLetters: - self.temporaryBuffer += data - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "</" + self.temporaryBuffer}) - self.stream.unget(data) - self.state = self.rawtextState - return True - - def scriptDataLessThanSignState(self): - data = self.stream.char() - if data == "/": - self.temporaryBuffer = "" - self.state = self.scriptDataEndTagOpenState - elif data == "!": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"}) - self.state = self.scriptDataEscapeStartState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.stream.unget(data) - self.state = self.scriptDataState - return True - - def scriptDataEndTagOpenState(self): - data = self.stream.char() - if data in asciiLetters: - self.temporaryBuffer += data - self.state = self.scriptDataEndTagNameState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) - self.stream.unget(data) - self.state = self.scriptDataState - return True - - def scriptDataEndTagNameState(self): - appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() - data = self.stream.char() - if data in spaceCharacters and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.beforeAttributeNameState - elif data == "/" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.selfClosingStartTagState - elif data == ">" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.emitCurrentToken() - self.state = self.dataState - elif data in asciiLetters: - self.temporaryBuffer += data - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "</" + self.temporaryBuffer}) - self.stream.unget(data) - self.state = self.scriptDataState - return True - - def scriptDataEscapeStartState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataEscapeStartDashState - else: - self.stream.unget(data) - self.state = self.scriptDataState - return True - - def scriptDataEscapeStartDashState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataEscapedDashDashState - else: - self.stream.unget(data) - self.state = self.scriptDataState - return True - - def scriptDataEscapedState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataEscapedDashState - elif data == "<": - self.state = self.scriptDataEscapedLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - elif data == EOF: - self.state = self.dataState - else: - chars = self.stream.charsUntil(("<", "-", "\u0000")) - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": - data + chars}) - return True - - def scriptDataEscapedDashState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataEscapedDashDashState - elif data == "<": - self.state = self.scriptDataEscapedLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - self.state = self.scriptDataEscapedState - elif data == EOF: - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.state = self.scriptDataEscapedState - return True - - def scriptDataEscapedDashDashState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - elif data == "<": - self.state = self.scriptDataEscapedLessThanSignState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) - self.state = self.scriptDataState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - self.state = self.scriptDataEscapedState - elif data == EOF: - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.state = self.scriptDataEscapedState - return True - - def scriptDataEscapedLessThanSignState(self): - data = self.stream.char() - if data == "/": - self.temporaryBuffer = "" - self.state = self.scriptDataEscapedEndTagOpenState - elif data in asciiLetters: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data}) - self.temporaryBuffer = data - self.state = self.scriptDataDoubleEscapeStartState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.stream.unget(data) - self.state = self.scriptDataEscapedState - return True - - def scriptDataEscapedEndTagOpenState(self): - data = self.stream.char() - if data in asciiLetters: - self.temporaryBuffer = data - self.state = self.scriptDataEscapedEndTagNameState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) - self.stream.unget(data) - self.state = self.scriptDataEscapedState - return True - - def scriptDataEscapedEndTagNameState(self): - appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() - data = self.stream.char() - if data in spaceCharacters and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.beforeAttributeNameState - elif data == "/" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.state = self.selfClosingStartTagState - elif data == ">" and appropriate: - self.currentToken = {"type": tokenTypes["EndTag"], - "name": self.temporaryBuffer, - "data": [], "selfClosing": False} - self.emitCurrentToken() - self.state = self.dataState - elif data in asciiLetters: - self.temporaryBuffer += data - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "</" + self.temporaryBuffer}) - self.stream.unget(data) - self.state = self.scriptDataEscapedState - return True - - def scriptDataDoubleEscapeStartState(self): - data = self.stream.char() - if data in (spaceCharacters | frozenset(("/", ">"))): - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - if self.temporaryBuffer.lower() == "script": - self.state = self.scriptDataDoubleEscapedState - else: - self.state = self.scriptDataEscapedState - elif data in asciiLetters: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.temporaryBuffer += data - else: - self.stream.unget(data) - self.state = self.scriptDataEscapedState - return True - - def scriptDataDoubleEscapedState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataDoubleEscapedDashState - elif data == "<": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.state = self.scriptDataDoubleEscapedLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - elif data == EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-script-in-script"}) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - return True - - def scriptDataDoubleEscapedDashState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - self.state = self.scriptDataDoubleEscapedDashDashState - elif data == "<": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.state = self.scriptDataDoubleEscapedLessThanSignState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - self.state = self.scriptDataDoubleEscapedState - elif data == EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-script-in-script"}) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.state = self.scriptDataDoubleEscapedState - return True - - def scriptDataDoubleEscapedDashDashState(self): - data = self.stream.char() - if data == "-": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) - elif data == "<": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) - self.state = self.scriptDataDoubleEscapedLessThanSignState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) - self.state = self.scriptDataState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": "\uFFFD"}) - self.state = self.scriptDataDoubleEscapedState - elif data == EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-script-in-script"}) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.state = self.scriptDataDoubleEscapedState - return True - - def scriptDataDoubleEscapedLessThanSignState(self): - data = self.stream.char() - if data == "/": - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"}) - self.temporaryBuffer = "" - self.state = self.scriptDataDoubleEscapeEndState - else: - self.stream.unget(data) - self.state = self.scriptDataDoubleEscapedState - return True - - def scriptDataDoubleEscapeEndState(self): - data = self.stream.char() - if data in (spaceCharacters | frozenset(("/", ">"))): - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - if self.temporaryBuffer.lower() == "script": - self.state = self.scriptDataEscapedState - else: - self.state = self.scriptDataDoubleEscapedState - elif data in asciiLetters: - self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) - self.temporaryBuffer += data - else: - self.stream.unget(data) - self.state = self.scriptDataDoubleEscapedState - return True - - def beforeAttributeNameState(self): - data = self.stream.char() - if data in spaceCharacters: - self.stream.charsUntil(spaceCharacters, True) - elif data in asciiLetters: - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - elif data == ">": - self.emitCurrentToken() - elif data == "/": - self.state = self.selfClosingStartTagState - elif data in ("'", '"', "=", "<"): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "invalid-character-in-attribute-name"}) - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"].append(["\uFFFD", ""]) - self.state = self.attributeNameState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-attribute-name-but-got-eof"}) - self.state = self.dataState - else: - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - return True - - def attributeNameState(self): - data = self.stream.char() - leavingThisState = True - emitToken = False - if data == "=": - self.state = self.beforeAttributeValueState - elif data in asciiLetters: - self.currentToken["data"][-1][0] += data +\ - self.stream.charsUntil(asciiLetters, True) - leavingThisState = False - elif data == ">": - # XXX If we emit here the attributes are converted to a dict - # without being checked and when the code below runs we error - # because data is a dict not a list - emitToken = True - elif data in spaceCharacters: - self.state = self.afterAttributeNameState - elif data == "/": - self.state = self.selfClosingStartTagState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"][-1][0] += "\uFFFD" - leavingThisState = False - elif data in ("'", '"', "<"): - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": - "invalid-character-in-attribute-name"}) - self.currentToken["data"][-1][0] += data - leavingThisState = False - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "eof-in-attribute-name"}) - self.state = self.dataState - else: - self.currentToken["data"][-1][0] += data - leavingThisState = False - - if leavingThisState: - # Attributes are not dropped at this stage. That happens when the - # start tag token is emitted so values can still be safely appended - # to attributes, but we do want to report the parse error in time. - self.currentToken["data"][-1][0] = ( - self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) - for name, _ in self.currentToken["data"][:-1]: - if self.currentToken["data"][-1][0] == name: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "duplicate-attribute"}) - break - # XXX Fix for above XXX - if emitToken: - self.emitCurrentToken() - return True - - def afterAttributeNameState(self): - data = self.stream.char() - if data in spaceCharacters: - self.stream.charsUntil(spaceCharacters, True) - elif data == "=": - self.state = self.beforeAttributeValueState - elif data == ">": - self.emitCurrentToken() - elif data in asciiLetters: - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - elif data == "/": - self.state = self.selfClosingStartTagState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"].append(["\uFFFD", ""]) - self.state = self.attributeNameState - elif data in ("'", '"', "<"): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "invalid-character-after-attribute-name"}) - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-end-of-tag-but-got-eof"}) - self.state = self.dataState - else: - self.currentToken["data"].append([data, ""]) - self.state = self.attributeNameState - return True - - def beforeAttributeValueState(self): - data = self.stream.char() - if data in spaceCharacters: - self.stream.charsUntil(spaceCharacters, True) - elif data == "\"": - self.state = self.attributeValueDoubleQuotedState - elif data == "&": - self.state = self.attributeValueUnQuotedState - self.stream.unget(data) - elif data == "'": - self.state = self.attributeValueSingleQuotedState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-attribute-value-but-got-right-bracket"}) - self.emitCurrentToken() - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"][-1][1] += "\uFFFD" - self.state = self.attributeValueUnQuotedState - elif data in ("=", "<", "`"): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "equals-in-unquoted-attribute-value"}) - self.currentToken["data"][-1][1] += data - self.state = self.attributeValueUnQuotedState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-attribute-value-but-got-eof"}) - self.state = self.dataState - else: - self.currentToken["data"][-1][1] += data - self.state = self.attributeValueUnQuotedState - return True - - def attributeValueDoubleQuotedState(self): - data = self.stream.char() - if data == "\"": - self.state = self.afterAttributeValueState - elif data == "&": - self.processEntityInAttribute('"') - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"][-1][1] += "\uFFFD" - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-attribute-value-double-quote"}) - self.state = self.dataState - else: - self.currentToken["data"][-1][1] += data +\ - self.stream.charsUntil(("\"", "&", "\u0000")) - return True - - def attributeValueSingleQuotedState(self): - data = self.stream.char() - if data == "'": - self.state = self.afterAttributeValueState - elif data == "&": - self.processEntityInAttribute("'") - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"][-1][1] += "\uFFFD" - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-attribute-value-single-quote"}) - self.state = self.dataState - else: - self.currentToken["data"][-1][1] += data +\ - self.stream.charsUntil(("'", "&", "\u0000")) - return True - - def attributeValueUnQuotedState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeAttributeNameState - elif data == "&": - self.processEntityInAttribute(">") - elif data == ">": - self.emitCurrentToken() - elif data in ('"', "'", "=", "<", "`"): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-character-in-unquoted-attribute-value"}) - self.currentToken["data"][-1][1] += data - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"][-1][1] += "\uFFFD" - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-attribute-value-no-quotes"}) - self.state = self.dataState - else: - self.currentToken["data"][-1][1] += data + self.stream.charsUntil( - frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters) - return True - - def afterAttributeValueState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeAttributeNameState - elif data == ">": - self.emitCurrentToken() - elif data == "/": - self.state = self.selfClosingStartTagState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-EOF-after-attribute-value"}) - self.stream.unget(data) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-character-after-attribute-value"}) - self.stream.unget(data) - self.state = self.beforeAttributeNameState - return True - - def selfClosingStartTagState(self): - data = self.stream.char() - if data == ">": - self.currentToken["selfClosing"] = True - self.emitCurrentToken() - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": - "unexpected-EOF-after-solidus-in-tag"}) - self.stream.unget(data) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-character-after-solidus-in-tag"}) - self.stream.unget(data) - self.state = self.beforeAttributeNameState - return True - - def bogusCommentState(self): - # Make a new comment token and give it as value all the characters - # until the first > or EOF (charsUntil checks for EOF automatically) - # and emit it. - data = self.stream.charsUntil(">") - data = data.replace("\u0000", "\uFFFD") - self.tokenQueue.append( - {"type": tokenTypes["Comment"], "data": data}) - - # Eat the character directly after the bogus comment which is either a - # ">" or an EOF. - self.stream.char() - self.state = self.dataState - return True - - def markupDeclarationOpenState(self): - charStack = [self.stream.char()] - if charStack[-1] == "-": - charStack.append(self.stream.char()) - if charStack[-1] == "-": - self.currentToken = {"type": tokenTypes["Comment"], "data": ""} - self.state = self.commentStartState - return True - elif charStack[-1] in ('d', 'D'): - matched = True - for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), - ('y', 'Y'), ('p', 'P'), ('e', 'E')): - charStack.append(self.stream.char()) - if charStack[-1] not in expected: - matched = False - break - if matched: - self.currentToken = {"type": tokenTypes["Doctype"], - "name": "", - "publicId": None, "systemId": None, - "correct": True} - self.state = self.doctypeState - return True - elif (charStack[-1] == "[" and - self.parser is not None and - self.parser.tree.openElements and - self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace): - matched = True - for expected in ["C", "D", "A", "T", "A", "["]: - charStack.append(self.stream.char()) - if charStack[-1] != expected: - matched = False - break - if matched: - self.state = self.cdataSectionState - return True - - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-dashes-or-doctype"}) - - while charStack: - self.stream.unget(charStack.pop()) - self.state = self.bogusCommentState - return True - - def commentStartState(self): - data = self.stream.char() - if data == "-": - self.state = self.commentStartDashState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "incorrect-comment"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-comment"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["data"] += data - self.state = self.commentState - return True - - def commentStartDashState(self): - data = self.stream.char() - if data == "-": - self.state = self.commentEndState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "-\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "incorrect-comment"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-comment"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["data"] += "-" + data - self.state = self.commentState - return True - - def commentState(self): - data = self.stream.char() - if data == "-": - self.state = self.commentEndDashState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "\uFFFD" - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "eof-in-comment"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["data"] += data + \ - self.stream.charsUntil(("-", "\u0000")) - return True - - def commentEndDashState(self): - data = self.stream.char() - if data == "-": - self.state = self.commentEndState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "-\uFFFD" - self.state = self.commentState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-comment-end-dash"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["data"] += "-" + data - self.state = self.commentState - return True - - def commentEndState(self): - data = self.stream.char() - if data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "--\uFFFD" - self.state = self.commentState - elif data == "!": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-bang-after-double-dash-in-comment"}) - self.state = self.commentEndBangState - elif data == "-": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-dash-after-double-dash-in-comment"}) - self.currentToken["data"] += data - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-comment-double-dash"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - # XXX - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-comment"}) - self.currentToken["data"] += "--" + data - self.state = self.commentState - return True - - def commentEndBangState(self): - data = self.stream.char() - if data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == "-": - self.currentToken["data"] += "--!" - self.state = self.commentEndDashState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["data"] += "--!\uFFFD" - self.state = self.commentState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-comment-end-bang-state"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["data"] += "--!" + data - self.state = self.commentState - return True - - def doctypeState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeDoctypeNameState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-doctype-name-but-got-eof"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "need-space-after-doctype"}) - self.stream.unget(data) - self.state = self.beforeDoctypeNameState - return True - - def beforeDoctypeNameState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-doctype-name-but-got-right-bracket"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["name"] = "\uFFFD" - self.state = self.doctypeNameState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-doctype-name-but-got-eof"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["name"] = data - self.state = self.doctypeNameState - return True - - def doctypeNameState(self): - data = self.stream.char() - if data in spaceCharacters: - self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) - self.state = self.afterDoctypeNameState - elif data == ">": - self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["name"] += "\uFFFD" - self.state = self.doctypeNameState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype-name"}) - self.currentToken["correct"] = False - self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["name"] += data - return True - - def afterDoctypeNameState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.currentToken["correct"] = False - self.stream.unget(data) - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - if data in ("p", "P"): - matched = True - for expected in (("u", "U"), ("b", "B"), ("l", "L"), - ("i", "I"), ("c", "C")): - data = self.stream.char() - if data not in expected: - matched = False - break - if matched: - self.state = self.afterDoctypePublicKeywordState - return True - elif data in ("s", "S"): - matched = True - for expected in (("y", "Y"), ("s", "S"), ("t", "T"), - ("e", "E"), ("m", "M")): - data = self.stream.char() - if data not in expected: - matched = False - break - if matched: - self.state = self.afterDoctypeSystemKeywordState - return True - - # All the characters read before the current 'data' will be - # [a-zA-Z], so they're garbage in the bogus doctype and can be - # discarded; only the latest character might be '>' or EOF - # and needs to be ungetted - self.stream.unget(data) - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "expected-space-or-right-bracket-in-doctype", "datavars": - {"data": data}}) - self.currentToken["correct"] = False - self.state = self.bogusDoctypeState - - return True - - def afterDoctypePublicKeywordState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeDoctypePublicIdentifierState - elif data in ("'", '"'): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.stream.unget(data) - self.state = self.beforeDoctypePublicIdentifierState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.stream.unget(data) - self.state = self.beforeDoctypePublicIdentifierState - return True - - def beforeDoctypePublicIdentifierState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == "\"": - self.currentToken["publicId"] = "" - self.state = self.doctypePublicIdentifierDoubleQuotedState - elif data == "'": - self.currentToken["publicId"] = "" - self.state = self.doctypePublicIdentifierSingleQuotedState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-end-of-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["correct"] = False - self.state = self.bogusDoctypeState - return True - - def doctypePublicIdentifierDoubleQuotedState(self): - data = self.stream.char() - if data == "\"": - self.state = self.afterDoctypePublicIdentifierState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["publicId"] += "\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-end-of-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["publicId"] += data - return True - - def doctypePublicIdentifierSingleQuotedState(self): - data = self.stream.char() - if data == "'": - self.state = self.afterDoctypePublicIdentifierState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["publicId"] += "\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-end-of-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["publicId"] += data - return True - - def afterDoctypePublicIdentifierState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.betweenDoctypePublicAndSystemIdentifiersState - elif data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == '"': - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierDoubleQuotedState - elif data == "'": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierSingleQuotedState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["correct"] = False - self.state = self.bogusDoctypeState - return True - - def betweenDoctypePublicAndSystemIdentifiersState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data == '"': - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierDoubleQuotedState - elif data == "'": - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierSingleQuotedState - elif data == EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["correct"] = False - self.state = self.bogusDoctypeState - return True - - def afterDoctypeSystemKeywordState(self): - data = self.stream.char() - if data in spaceCharacters: - self.state = self.beforeDoctypeSystemIdentifierState - elif data in ("'", '"'): - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.stream.unget(data) - self.state = self.beforeDoctypeSystemIdentifierState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.stream.unget(data) - self.state = self.beforeDoctypeSystemIdentifierState - return True - - def beforeDoctypeSystemIdentifierState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == "\"": - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierDoubleQuotedState - elif data == "'": - self.currentToken["systemId"] = "" - self.state = self.doctypeSystemIdentifierSingleQuotedState - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.currentToken["correct"] = False - self.state = self.bogusDoctypeState - return True - - def doctypeSystemIdentifierDoubleQuotedState(self): - data = self.stream.char() - if data == "\"": - self.state = self.afterDoctypeSystemIdentifierState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["systemId"] += "\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-end-of-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["systemId"] += data - return True - - def doctypeSystemIdentifierSingleQuotedState(self): - data = self.stream.char() - if data == "'": - self.state = self.afterDoctypeSystemIdentifierState - elif data == "\u0000": - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - self.currentToken["systemId"] += "\uFFFD" - elif data == ">": - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-end-of-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.currentToken["systemId"] += data - return True - - def afterDoctypeSystemIdentifierState(self): - data = self.stream.char() - if data in spaceCharacters: - pass - elif data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "eof-in-doctype"}) - self.currentToken["correct"] = False - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": - "unexpected-char-in-doctype"}) - self.state = self.bogusDoctypeState - return True - - def bogusDoctypeState(self): - data = self.stream.char() - if data == ">": - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - elif data is EOF: - # XXX EMIT - self.stream.unget(data) - self.tokenQueue.append(self.currentToken) - self.state = self.dataState - else: - pass - return True - - def cdataSectionState(self): - data = [] - while True: - data.append(self.stream.charsUntil("]")) - data.append(self.stream.charsUntil(">")) - char = self.stream.char() - if char == EOF: - break - else: - assert char == ">" - if data[-1][-2:] == "]]": - data[-1] = data[-1][:-2] - break - else: - data.append(char) - - data = "".join(data) # pylint:disable=redefined-variable-type - # Deal with null here rather than in the parser - nullCount = data.count("\u0000") - if nullCount > 0: - for _ in range(nullCount): - self.tokenQueue.append({"type": tokenTypes["ParseError"], - "data": "invalid-codepoint"}) - data = data.replace("\u0000", "\uFFFD") - if data: - self.tokenQueue.append({"type": tokenTypes["Characters"], - "data": data}) - self.state = self.dataState - return True diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__init__.py b/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__init__.py deleted file mode 100644 index 07bad5d31c1ec7aa7ac081dac5586db91f3c5a99..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from .py import Trie - -__all__ = ["Trie"] diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 948138bf21c0d7677f4e18a9dcc56d220438985a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-39.pyc deleted file mode 100644 index 2693bbb73e2f1897267f6d0e71288337437604d2..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__pycache__/py.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__pycache__/py.cpython-39.pyc deleted file mode 100644 index ff2ef154a2f14c3b384b2ffb17b82206e1625b9e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/__pycache__/py.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/_base.py b/env/Lib/site-packages/pip/_vendor/html5lib/_trie/_base.py deleted file mode 100644 index 6b71975f08587ca8861acc2380f17701b7c7c656..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/_base.py +++ /dev/null @@ -1,40 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -try: - from collections.abc import Mapping -except ImportError: # Python 2.7 - from collections import Mapping - - -class Trie(Mapping): - """Abstract base class for tries""" - - def keys(self, prefix=None): - # pylint:disable=arguments-differ - keys = super(Trie, self).keys() - - if prefix is None: - return set(keys) - - return {x for x in keys if x.startswith(prefix)} - - def has_keys_with_prefix(self, prefix): - for key in self.keys(): - if key.startswith(prefix): - return True - - return False - - def longest_prefix(self, prefix): - if prefix in self: - return prefix - - for i in range(1, len(prefix) + 1): - if prefix[:-i] in self: - return prefix[:-i] - - raise KeyError(prefix) - - def longest_prefix_item(self, prefix): - lprefix = self.longest_prefix(prefix) - return (lprefix, self[lprefix]) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/py.py b/env/Lib/site-packages/pip/_vendor/html5lib/_trie/py.py deleted file mode 100644 index c178b219def8f7fbfe6d00f7822a8e61293d003a..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/_trie/py.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import text_type - -from bisect import bisect_left - -from ._base import Trie as ABCTrie - - -class Trie(ABCTrie): - def __init__(self, data): - if not all(isinstance(x, text_type) for x in data.keys()): - raise TypeError("All keys must be strings") - - self._data = data - self._keys = sorted(data.keys()) - self._cachestr = "" - self._cachepoints = (0, len(data)) - - def __contains__(self, key): - return key in self._data - - def __len__(self): - return len(self._data) - - def __iter__(self): - return iter(self._data) - - def __getitem__(self, key): - return self._data[key] - - def keys(self, prefix=None): - if prefix is None or prefix == "" or not self._keys: - return set(self._keys) - - if prefix.startswith(self._cachestr): - lo, hi = self._cachepoints - start = i = bisect_left(self._keys, prefix, lo, hi) - else: - start = i = bisect_left(self._keys, prefix) - - keys = set() - if start == len(self._keys): - return keys - - while self._keys[i].startswith(prefix): - keys.add(self._keys[i]) - i += 1 - - self._cachestr = prefix - self._cachepoints = (start, i) - - return keys - - def has_keys_with_prefix(self, prefix): - if prefix in self._data: - return True - - if prefix.startswith(self._cachestr): - lo, hi = self._cachepoints - i = bisect_left(self._keys, prefix, lo, hi) - else: - i = bisect_left(self._keys, prefix) - - if i == len(self._keys): - return False - - return self._keys[i].startswith(prefix) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/_utils.py b/env/Lib/site-packages/pip/_vendor/html5lib/_utils.py deleted file mode 100644 index d7c4926afce32cad6a36379f6e159e7fe6215c75..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/_utils.py +++ /dev/null @@ -1,159 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from types import ModuleType - -try: - from collections.abc import Mapping -except ImportError: - from collections import Mapping - -from pip._vendor.six import text_type, PY3 - -if PY3: - import xml.etree.ElementTree as default_etree -else: - try: - import xml.etree.cElementTree as default_etree - except ImportError: - import xml.etree.ElementTree as default_etree - - -__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair", - "surrogatePairToCodepoint", "moduleFactoryFactory", - "supports_lone_surrogates"] - - -# Platforms not supporting lone surrogates (\uD800-\uDFFF) should be -# caught by the below test. In general this would be any platform -# using UTF-16 as its encoding of unicode strings, such as -# Jython. This is because UTF-16 itself is based on the use of such -# surrogates, and there is no mechanism to further escape such -# escapes. -try: - _x = eval('"\\uD800"') # pylint:disable=eval-used - if not isinstance(_x, text_type): - # We need this with u"" because of http://bugs.jython.org/issue2039 - _x = eval('u"\\uD800"') # pylint:disable=eval-used - assert isinstance(_x, text_type) -except Exception: - supports_lone_surrogates = False -else: - supports_lone_surrogates = True - - -class MethodDispatcher(dict): - """Dict with 2 special properties: - - On initiation, keys that are lists, sets or tuples are converted to - multiple keys so accessing any one of the items in the original - list-like object returns the matching value - - md = MethodDispatcher({("foo", "bar"):"baz"}) - md["foo"] == "baz" - - A default value which can be set through the default attribute. - """ - - def __init__(self, items=()): - _dictEntries = [] - for name, value in items: - if isinstance(name, (list, tuple, frozenset, set)): - for item in name: - _dictEntries.append((item, value)) - else: - _dictEntries.append((name, value)) - dict.__init__(self, _dictEntries) - assert len(self) == len(_dictEntries) - self.default = None - - def __getitem__(self, key): - return dict.get(self, key, self.default) - - def __get__(self, instance, owner=None): - return BoundMethodDispatcher(instance, self) - - -class BoundMethodDispatcher(Mapping): - """Wraps a MethodDispatcher, binding its return values to `instance`""" - def __init__(self, instance, dispatcher): - self.instance = instance - self.dispatcher = dispatcher - - def __getitem__(self, key): - # see https://docs.python.org/3/reference/datamodel.html#object.__get__ - # on a function, __get__ is used to bind a function to an instance as a bound method - return self.dispatcher[key].__get__(self.instance) - - def get(self, key, default): - if key in self.dispatcher: - return self[key] - else: - return default - - def __iter__(self): - return iter(self.dispatcher) - - def __len__(self): - return len(self.dispatcher) - - def __contains__(self, key): - return key in self.dispatcher - - -# Some utility functions to deal with weirdness around UCS2 vs UCS4 -# python builds - -def isSurrogatePair(data): - return (len(data) == 2 and - ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and - ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF) - - -def surrogatePairToCodepoint(data): - char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 + - (ord(data[1]) - 0xDC00)) - return char_val - -# Module Factory Factory (no, this isn't Java, I know) -# Here to stop this being duplicated all over the place. - - -def moduleFactoryFactory(factory): - moduleCache = {} - - def moduleFactory(baseModule, *args, **kwargs): - if isinstance(ModuleType.__name__, type("")): - name = "_%s_factory" % baseModule.__name__ - else: - name = b"_%s_factory" % baseModule.__name__ - - kwargs_tuple = tuple(kwargs.items()) - - try: - return moduleCache[name][args][kwargs_tuple] - except KeyError: - mod = ModuleType(name) - objs = factory(baseModule, *args, **kwargs) - mod.__dict__.update(objs) - if "name" not in moduleCache: - moduleCache[name] = {} - if "args" not in moduleCache[name]: - moduleCache[name][args] = {} - if "kwargs" not in moduleCache[name][args]: - moduleCache[name][args][kwargs_tuple] = {} - moduleCache[name][args][kwargs_tuple] = mod - return mod - - return moduleFactory - - -def memoize(func): - cache = {} - - def wrapped(*args, **kwargs): - key = (tuple(args), tuple(kwargs.items())) - if key not in cache: - cache[key] = func(*args, **kwargs) - return cache[key] - - return wrapped diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/constants.py b/env/Lib/site-packages/pip/_vendor/html5lib/constants.py deleted file mode 100644 index fe3e237cd8a118da1c707412fe8251d2e19477c5..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/constants.py +++ /dev/null @@ -1,2946 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -import string - -EOF = None - -E = { - "null-character": - "Null character in input stream, replaced with U+FFFD.", - "invalid-codepoint": - "Invalid codepoint in stream.", - "incorrectly-placed-solidus": - "Solidus (/) incorrectly placed in tag.", - "incorrect-cr-newline-entity": - "Incorrect CR newline entity, replaced with LF.", - "illegal-windows-1252-entity": - "Entity used with illegal number (windows-1252 reference).", - "cant-convert-numeric-entity": - "Numeric entity couldn't be converted to character " - "(codepoint U+%(charAsInt)08x).", - "illegal-codepoint-for-numeric-entity": - "Numeric entity represents an illegal codepoint: " - "U+%(charAsInt)08x.", - "numeric-entity-without-semicolon": - "Numeric entity didn't end with ';'.", - "expected-numeric-entity-but-got-eof": - "Numeric entity expected. Got end of file instead.", - "expected-numeric-entity": - "Numeric entity expected but none found.", - "named-entity-without-semicolon": - "Named entity didn't end with ';'.", - "expected-named-entity": - "Named entity expected. Got none.", - "attributes-in-end-tag": - "End tag contains unexpected attributes.", - 'self-closing-flag-on-end-tag': - "End tag contains unexpected self-closing flag.", - "expected-tag-name-but-got-right-bracket": - "Expected tag name. Got '>' instead.", - "expected-tag-name-but-got-question-mark": - "Expected tag name. Got '?' instead. (HTML doesn't " - "support processing instructions.)", - "expected-tag-name": - "Expected tag name. Got something else instead", - "expected-closing-tag-but-got-right-bracket": - "Expected closing tag. Got '>' instead. Ignoring '</>'.", - "expected-closing-tag-but-got-eof": - "Expected closing tag. Unexpected end of file.", - "expected-closing-tag-but-got-char": - "Expected closing tag. Unexpected character '%(data)s' found.", - "eof-in-tag-name": - "Unexpected end of file in the tag name.", - "expected-attribute-name-but-got-eof": - "Unexpected end of file. Expected attribute name instead.", - "eof-in-attribute-name": - "Unexpected end of file in attribute name.", - "invalid-character-in-attribute-name": - "Invalid character in attribute name", - "duplicate-attribute": - "Dropped duplicate attribute on tag.", - "expected-end-of-tag-name-but-got-eof": - "Unexpected end of file. Expected = or end of tag.", - "expected-attribute-value-but-got-eof": - "Unexpected end of file. Expected attribute value.", - "expected-attribute-value-but-got-right-bracket": - "Expected attribute value. Got '>' instead.", - 'equals-in-unquoted-attribute-value': - "Unexpected = in unquoted attribute", - 'unexpected-character-in-unquoted-attribute-value': - "Unexpected character in unquoted attribute", - "invalid-character-after-attribute-name": - "Unexpected character after attribute name.", - "unexpected-character-after-attribute-value": - "Unexpected character after attribute value.", - "eof-in-attribute-value-double-quote": - "Unexpected end of file in attribute value (\").", - "eof-in-attribute-value-single-quote": - "Unexpected end of file in attribute value (').", - "eof-in-attribute-value-no-quotes": - "Unexpected end of file in attribute value.", - "unexpected-EOF-after-solidus-in-tag": - "Unexpected end of file in tag. Expected >", - "unexpected-character-after-solidus-in-tag": - "Unexpected character after / in tag. Expected >", - "expected-dashes-or-doctype": - "Expected '--' or 'DOCTYPE'. Not found.", - "unexpected-bang-after-double-dash-in-comment": - "Unexpected ! after -- in comment", - "unexpected-space-after-double-dash-in-comment": - "Unexpected space after -- in comment", - "incorrect-comment": - "Incorrect comment.", - "eof-in-comment": - "Unexpected end of file in comment.", - "eof-in-comment-end-dash": - "Unexpected end of file in comment (-)", - "unexpected-dash-after-double-dash-in-comment": - "Unexpected '-' after '--' found in comment.", - "eof-in-comment-double-dash": - "Unexpected end of file in comment (--).", - "eof-in-comment-end-space-state": - "Unexpected end of file in comment.", - "eof-in-comment-end-bang-state": - "Unexpected end of file in comment.", - "unexpected-char-in-comment": - "Unexpected character in comment found.", - "need-space-after-doctype": - "No space after literal string 'DOCTYPE'.", - "expected-doctype-name-but-got-right-bracket": - "Unexpected > character. Expected DOCTYPE name.", - "expected-doctype-name-but-got-eof": - "Unexpected end of file. Expected DOCTYPE name.", - "eof-in-doctype-name": - "Unexpected end of file in DOCTYPE name.", - "eof-in-doctype": - "Unexpected end of file in DOCTYPE.", - "expected-space-or-right-bracket-in-doctype": - "Expected space or '>'. Got '%(data)s'", - "unexpected-end-of-doctype": - "Unexpected end of DOCTYPE.", - "unexpected-char-in-doctype": - "Unexpected character in DOCTYPE.", - "eof-in-innerhtml": - "XXX innerHTML EOF", - "unexpected-doctype": - "Unexpected DOCTYPE. Ignored.", - "non-html-root": - "html needs to be the first start tag.", - "expected-doctype-but-got-eof": - "Unexpected End of file. Expected DOCTYPE.", - "unknown-doctype": - "Erroneous DOCTYPE.", - "expected-doctype-but-got-chars": - "Unexpected non-space characters. Expected DOCTYPE.", - "expected-doctype-but-got-start-tag": - "Unexpected start tag (%(name)s). Expected DOCTYPE.", - "expected-doctype-but-got-end-tag": - "Unexpected end tag (%(name)s). Expected DOCTYPE.", - "end-tag-after-implied-root": - "Unexpected end tag (%(name)s) after the (implied) root element.", - "expected-named-closing-tag-but-got-eof": - "Unexpected end of file. Expected end tag (%(name)s).", - "two-heads-are-not-better-than-one": - "Unexpected start tag head in existing head. Ignored.", - "unexpected-end-tag": - "Unexpected end tag (%(name)s). Ignored.", - "unexpected-start-tag-out-of-my-head": - "Unexpected start tag (%(name)s) that can be in head. Moved.", - "unexpected-start-tag": - "Unexpected start tag (%(name)s).", - "missing-end-tag": - "Missing end tag (%(name)s).", - "missing-end-tags": - "Missing end tags (%(name)s).", - "unexpected-start-tag-implies-end-tag": - "Unexpected start tag (%(startName)s) " - "implies end tag (%(endName)s).", - "unexpected-start-tag-treated-as": - "Unexpected start tag (%(originalName)s). Treated as %(newName)s.", - "deprecated-tag": - "Unexpected start tag %(name)s. Don't use it!", - "unexpected-start-tag-ignored": - "Unexpected start tag %(name)s. Ignored.", - "expected-one-end-tag-but-got-another": - "Unexpected end tag (%(gotName)s). " - "Missing end tag (%(expectedName)s).", - "end-tag-too-early": - "End tag (%(name)s) seen too early. Expected other end tag.", - "end-tag-too-early-named": - "Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).", - "end-tag-too-early-ignored": - "End tag (%(name)s) seen too early. Ignored.", - "adoption-agency-1.1": - "End tag (%(name)s) violates step 1, " - "paragraph 1 of the adoption agency algorithm.", - "adoption-agency-1.2": - "End tag (%(name)s) violates step 1, " - "paragraph 2 of the adoption agency algorithm.", - "adoption-agency-1.3": - "End tag (%(name)s) violates step 1, " - "paragraph 3 of the adoption agency algorithm.", - "adoption-agency-4.4": - "End tag (%(name)s) violates step 4, " - "paragraph 4 of the adoption agency algorithm.", - "unexpected-end-tag-treated-as": - "Unexpected end tag (%(originalName)s). Treated as %(newName)s.", - "no-end-tag": - "This element (%(name)s) has no end tag.", - "unexpected-implied-end-tag-in-table": - "Unexpected implied end tag (%(name)s) in the table phase.", - "unexpected-implied-end-tag-in-table-body": - "Unexpected implied end tag (%(name)s) in the table body phase.", - "unexpected-char-implies-table-voodoo": - "Unexpected non-space characters in " - "table context caused voodoo mode.", - "unexpected-hidden-input-in-table": - "Unexpected input with type hidden in table context.", - "unexpected-form-in-table": - "Unexpected form in table context.", - "unexpected-start-tag-implies-table-voodoo": - "Unexpected start tag (%(name)s) in " - "table context caused voodoo mode.", - "unexpected-end-tag-implies-table-voodoo": - "Unexpected end tag (%(name)s) in " - "table context caused voodoo mode.", - "unexpected-cell-in-table-body": - "Unexpected table cell start tag (%(name)s) " - "in the table body phase.", - "unexpected-cell-end-tag": - "Got table cell end tag (%(name)s) " - "while required end tags are missing.", - "unexpected-end-tag-in-table-body": - "Unexpected end tag (%(name)s) in the table body phase. Ignored.", - "unexpected-implied-end-tag-in-table-row": - "Unexpected implied end tag (%(name)s) in the table row phase.", - "unexpected-end-tag-in-table-row": - "Unexpected end tag (%(name)s) in the table row phase. Ignored.", - "unexpected-select-in-select": - "Unexpected select start tag in the select phase " - "treated as select end tag.", - "unexpected-input-in-select": - "Unexpected input start tag in the select phase.", - "unexpected-start-tag-in-select": - "Unexpected start tag token (%(name)s in the select phase. " - "Ignored.", - "unexpected-end-tag-in-select": - "Unexpected end tag (%(name)s) in the select phase. Ignored.", - "unexpected-table-element-start-tag-in-select-in-table": - "Unexpected table element start tag (%(name)s) in the select in table phase.", - "unexpected-table-element-end-tag-in-select-in-table": - "Unexpected table element end tag (%(name)s) in the select in table phase.", - "unexpected-char-after-body": - "Unexpected non-space characters in the after body phase.", - "unexpected-start-tag-after-body": - "Unexpected start tag token (%(name)s)" - " in the after body phase.", - "unexpected-end-tag-after-body": - "Unexpected end tag token (%(name)s)" - " in the after body phase.", - "unexpected-char-in-frameset": - "Unexpected characters in the frameset phase. Characters ignored.", - "unexpected-start-tag-in-frameset": - "Unexpected start tag token (%(name)s)" - " in the frameset phase. Ignored.", - "unexpected-frameset-in-frameset-innerhtml": - "Unexpected end tag token (frameset) " - "in the frameset phase (innerHTML).", - "unexpected-end-tag-in-frameset": - "Unexpected end tag token (%(name)s)" - " in the frameset phase. Ignored.", - "unexpected-char-after-frameset": - "Unexpected non-space characters in the " - "after frameset phase. Ignored.", - "unexpected-start-tag-after-frameset": - "Unexpected start tag (%(name)s)" - " in the after frameset phase. Ignored.", - "unexpected-end-tag-after-frameset": - "Unexpected end tag (%(name)s)" - " in the after frameset phase. Ignored.", - "unexpected-end-tag-after-body-innerhtml": - "Unexpected end tag after body(innerHtml)", - "expected-eof-but-got-char": - "Unexpected non-space characters. Expected end of file.", - "expected-eof-but-got-start-tag": - "Unexpected start tag (%(name)s)" - ". Expected end of file.", - "expected-eof-but-got-end-tag": - "Unexpected end tag (%(name)s)" - ". Expected end of file.", - "eof-in-table": - "Unexpected end of file. Expected table content.", - "eof-in-select": - "Unexpected end of file. Expected select content.", - "eof-in-frameset": - "Unexpected end of file. Expected frameset content.", - "eof-in-script-in-script": - "Unexpected end of file. Expected script content.", - "eof-in-foreign-lands": - "Unexpected end of file. Expected foreign content", - "non-void-element-with-trailing-solidus": - "Trailing solidus not allowed on element %(name)s", - "unexpected-html-element-in-foreign-content": - "Element %(name)s not allowed in a non-html context", - "unexpected-end-tag-before-html": - "Unexpected end tag (%(name)s) before html.", - "unexpected-inhead-noscript-tag": - "Element %(name)s not allowed in a inhead-noscript context", - "eof-in-head-noscript": - "Unexpected end of file. Expected inhead-noscript content", - "char-in-head-noscript": - "Unexpected non-space character. Expected inhead-noscript content", - "XXX-undefined-error": - "Undefined error (this sucks and should be fixed)", -} - -namespaces = { - "html": "http://www.w3.org/1999/xhtml", - "mathml": "http://www.w3.org/1998/Math/MathML", - "svg": "http://www.w3.org/2000/svg", - "xlink": "http://www.w3.org/1999/xlink", - "xml": "http://www.w3.org/XML/1998/namespace", - "xmlns": "http://www.w3.org/2000/xmlns/" -} - -scopingElements = frozenset([ - (namespaces["html"], "applet"), - (namespaces["html"], "caption"), - (namespaces["html"], "html"), - (namespaces["html"], "marquee"), - (namespaces["html"], "object"), - (namespaces["html"], "table"), - (namespaces["html"], "td"), - (namespaces["html"], "th"), - (namespaces["mathml"], "mi"), - (namespaces["mathml"], "mo"), - (namespaces["mathml"], "mn"), - (namespaces["mathml"], "ms"), - (namespaces["mathml"], "mtext"), - (namespaces["mathml"], "annotation-xml"), - (namespaces["svg"], "foreignObject"), - (namespaces["svg"], "desc"), - (namespaces["svg"], "title"), -]) - -formattingElements = frozenset([ - (namespaces["html"], "a"), - (namespaces["html"], "b"), - (namespaces["html"], "big"), - (namespaces["html"], "code"), - (namespaces["html"], "em"), - (namespaces["html"], "font"), - (namespaces["html"], "i"), - (namespaces["html"], "nobr"), - (namespaces["html"], "s"), - (namespaces["html"], "small"), - (namespaces["html"], "strike"), - (namespaces["html"], "strong"), - (namespaces["html"], "tt"), - (namespaces["html"], "u") -]) - -specialElements = frozenset([ - (namespaces["html"], "address"), - (namespaces["html"], "applet"), - (namespaces["html"], "area"), - (namespaces["html"], "article"), - (namespaces["html"], "aside"), - (namespaces["html"], "base"), - (namespaces["html"], "basefont"), - (namespaces["html"], "bgsound"), - (namespaces["html"], "blockquote"), - (namespaces["html"], "body"), - (namespaces["html"], "br"), - (namespaces["html"], "button"), - (namespaces["html"], "caption"), - (namespaces["html"], "center"), - (namespaces["html"], "col"), - (namespaces["html"], "colgroup"), - (namespaces["html"], "command"), - (namespaces["html"], "dd"), - (namespaces["html"], "details"), - (namespaces["html"], "dir"), - (namespaces["html"], "div"), - (namespaces["html"], "dl"), - (namespaces["html"], "dt"), - (namespaces["html"], "embed"), - (namespaces["html"], "fieldset"), - (namespaces["html"], "figure"), - (namespaces["html"], "footer"), - (namespaces["html"], "form"), - (namespaces["html"], "frame"), - (namespaces["html"], "frameset"), - (namespaces["html"], "h1"), - (namespaces["html"], "h2"), - (namespaces["html"], "h3"), - (namespaces["html"], "h4"), - (namespaces["html"], "h5"), - (namespaces["html"], "h6"), - (namespaces["html"], "head"), - (namespaces["html"], "header"), - (namespaces["html"], "hr"), - (namespaces["html"], "html"), - (namespaces["html"], "iframe"), - # Note that image is commented out in the spec as "this isn't an - # element that can end up on the stack, so it doesn't matter," - (namespaces["html"], "image"), - (namespaces["html"], "img"), - (namespaces["html"], "input"), - (namespaces["html"], "isindex"), - (namespaces["html"], "li"), - (namespaces["html"], "link"), - (namespaces["html"], "listing"), - (namespaces["html"], "marquee"), - (namespaces["html"], "menu"), - (namespaces["html"], "meta"), - (namespaces["html"], "nav"), - (namespaces["html"], "noembed"), - (namespaces["html"], "noframes"), - (namespaces["html"], "noscript"), - (namespaces["html"], "object"), - (namespaces["html"], "ol"), - (namespaces["html"], "p"), - (namespaces["html"], "param"), - (namespaces["html"], "plaintext"), - (namespaces["html"], "pre"), - (namespaces["html"], "script"), - (namespaces["html"], "section"), - (namespaces["html"], "select"), - (namespaces["html"], "style"), - (namespaces["html"], "table"), - (namespaces["html"], "tbody"), - (namespaces["html"], "td"), - (namespaces["html"], "textarea"), - (namespaces["html"], "tfoot"), - (namespaces["html"], "th"), - (namespaces["html"], "thead"), - (namespaces["html"], "title"), - (namespaces["html"], "tr"), - (namespaces["html"], "ul"), - (namespaces["html"], "wbr"), - (namespaces["html"], "xmp"), - (namespaces["svg"], "foreignObject") -]) - -htmlIntegrationPointElements = frozenset([ - (namespaces["mathml"], "annotation-xml"), - (namespaces["svg"], "foreignObject"), - (namespaces["svg"], "desc"), - (namespaces["svg"], "title") -]) - -mathmlTextIntegrationPointElements = frozenset([ - (namespaces["mathml"], "mi"), - (namespaces["mathml"], "mo"), - (namespaces["mathml"], "mn"), - (namespaces["mathml"], "ms"), - (namespaces["mathml"], "mtext") -]) - -adjustSVGAttributes = { - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterres": "filterRes", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan" -} - -adjustMathMLAttributes = {"definitionurl": "definitionURL"} - -adjustForeignAttributes = { - "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), - "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), - "xlink:href": ("xlink", "href", namespaces["xlink"]), - "xlink:role": ("xlink", "role", namespaces["xlink"]), - "xlink:show": ("xlink", "show", namespaces["xlink"]), - "xlink:title": ("xlink", "title", namespaces["xlink"]), - "xlink:type": ("xlink", "type", namespaces["xlink"]), - "xml:base": ("xml", "base", namespaces["xml"]), - "xml:lang": ("xml", "lang", namespaces["xml"]), - "xml:space": ("xml", "space", namespaces["xml"]), - "xmlns": (None, "xmlns", namespaces["xmlns"]), - "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) -} - -unadjustForeignAttributes = {(ns, local): qname for qname, (prefix, local, ns) in - adjustForeignAttributes.items()} - -spaceCharacters = frozenset([ - "\t", - "\n", - "\u000C", - " ", - "\r" -]) - -tableInsertModeElements = frozenset([ - "table", - "tbody", - "tfoot", - "thead", - "tr" -]) - -asciiLowercase = frozenset(string.ascii_lowercase) -asciiUppercase = frozenset(string.ascii_uppercase) -asciiLetters = frozenset(string.ascii_letters) -digits = frozenset(string.digits) -hexDigits = frozenset(string.hexdigits) - -asciiUpper2Lower = {ord(c): ord(c.lower()) for c in string.ascii_uppercase} - -# Heading elements need to be ordered -headingElements = ( - "h1", - "h2", - "h3", - "h4", - "h5", - "h6" -) - -voidElements = frozenset([ - "base", - "command", - "event-source", - "link", - "meta", - "hr", - "br", - "img", - "embed", - "param", - "area", - "col", - "input", - "source", - "track" -]) - -cdataElements = frozenset(['title', 'textarea']) - -rcdataElements = frozenset([ - 'style', - 'script', - 'xmp', - 'iframe', - 'noembed', - 'noframes', - 'noscript' -]) - -booleanAttributes = { - "": frozenset(["irrelevant", "itemscope"]), - "style": frozenset(["scoped"]), - "img": frozenset(["ismap"]), - "audio": frozenset(["autoplay", "controls"]), - "video": frozenset(["autoplay", "controls"]), - "script": frozenset(["defer", "async"]), - "details": frozenset(["open"]), - "datagrid": frozenset(["multiple", "disabled"]), - "command": frozenset(["hidden", "disabled", "checked", "default"]), - "hr": frozenset(["noshade"]), - "menu": frozenset(["autosubmit"]), - "fieldset": frozenset(["disabled", "readonly"]), - "option": frozenset(["disabled", "readonly", "selected"]), - "optgroup": frozenset(["disabled", "readonly"]), - "button": frozenset(["disabled", "autofocus"]), - "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]), - "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]), - "output": frozenset(["disabled", "readonly"]), - "iframe": frozenset(["seamless"]), -} - -# entitiesWindows1252 has to be _ordered_ and needs to have an index. It -# therefore can't be a frozenset. -entitiesWindows1252 = ( - 8364, # 0x80 0x20AC EURO SIGN - 65533, # 0x81 UNDEFINED - 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK - 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK - 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK - 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS - 8224, # 0x86 0x2020 DAGGER - 8225, # 0x87 0x2021 DOUBLE DAGGER - 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT - 8240, # 0x89 0x2030 PER MILLE SIGN - 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON - 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK - 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE - 65533, # 0x8D UNDEFINED - 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON - 65533, # 0x8F UNDEFINED - 65533, # 0x90 UNDEFINED - 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK - 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK - 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK - 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK - 8226, # 0x95 0x2022 BULLET - 8211, # 0x96 0x2013 EN DASH - 8212, # 0x97 0x2014 EM DASH - 732, # 0x98 0x02DC SMALL TILDE - 8482, # 0x99 0x2122 TRADE MARK SIGN - 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON - 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK - 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE - 65533, # 0x9D UNDEFINED - 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON - 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS -) - -xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;']) - -entities = { - "AElig": "\xc6", - "AElig;": "\xc6", - "AMP": "&", - "AMP;": "&", - "Aacute": "\xc1", - "Aacute;": "\xc1", - "Abreve;": "\u0102", - "Acirc": "\xc2", - "Acirc;": "\xc2", - "Acy;": "\u0410", - "Afr;": "\U0001d504", - "Agrave": "\xc0", - "Agrave;": "\xc0", - "Alpha;": "\u0391", - "Amacr;": "\u0100", - "And;": "\u2a53", - "Aogon;": "\u0104", - "Aopf;": "\U0001d538", - "ApplyFunction;": "\u2061", - "Aring": "\xc5", - "Aring;": "\xc5", - "Ascr;": "\U0001d49c", - "Assign;": "\u2254", - "Atilde": "\xc3", - "Atilde;": "\xc3", - "Auml": "\xc4", - "Auml;": "\xc4", - "Backslash;": "\u2216", - "Barv;": "\u2ae7", - "Barwed;": "\u2306", - "Bcy;": "\u0411", - "Because;": "\u2235", - "Bernoullis;": "\u212c", - "Beta;": "\u0392", - "Bfr;": "\U0001d505", - "Bopf;": "\U0001d539", - "Breve;": "\u02d8", - "Bscr;": "\u212c", - "Bumpeq;": "\u224e", - "CHcy;": "\u0427", - "COPY": "\xa9", - "COPY;": "\xa9", - "Cacute;": "\u0106", - "Cap;": "\u22d2", - "CapitalDifferentialD;": "\u2145", - "Cayleys;": "\u212d", - "Ccaron;": "\u010c", - "Ccedil": "\xc7", - "Ccedil;": "\xc7", - "Ccirc;": "\u0108", - "Cconint;": "\u2230", - "Cdot;": "\u010a", - "Cedilla;": "\xb8", - "CenterDot;": "\xb7", - "Cfr;": "\u212d", - "Chi;": "\u03a7", - "CircleDot;": "\u2299", - "CircleMinus;": "\u2296", - "CirclePlus;": "\u2295", - "CircleTimes;": "\u2297", - "ClockwiseContourIntegral;": "\u2232", - "CloseCurlyDoubleQuote;": "\u201d", - "CloseCurlyQuote;": "\u2019", - "Colon;": "\u2237", - "Colone;": "\u2a74", - "Congruent;": "\u2261", - "Conint;": "\u222f", - "ContourIntegral;": "\u222e", - "Copf;": "\u2102", - "Coproduct;": "\u2210", - "CounterClockwiseContourIntegral;": "\u2233", - "Cross;": "\u2a2f", - "Cscr;": "\U0001d49e", - "Cup;": "\u22d3", - "CupCap;": "\u224d", - "DD;": "\u2145", - "DDotrahd;": "\u2911", - "DJcy;": "\u0402", - "DScy;": "\u0405", - "DZcy;": "\u040f", - "Dagger;": "\u2021", - "Darr;": "\u21a1", - "Dashv;": "\u2ae4", - "Dcaron;": "\u010e", - "Dcy;": "\u0414", - "Del;": "\u2207", - "Delta;": "\u0394", - "Dfr;": "\U0001d507", - "DiacriticalAcute;": "\xb4", - "DiacriticalDot;": "\u02d9", - "DiacriticalDoubleAcute;": "\u02dd", - "DiacriticalGrave;": "`", - "DiacriticalTilde;": "\u02dc", - "Diamond;": "\u22c4", - "DifferentialD;": "\u2146", - "Dopf;": "\U0001d53b", - "Dot;": "\xa8", - "DotDot;": "\u20dc", - "DotEqual;": "\u2250", - "DoubleContourIntegral;": "\u222f", - "DoubleDot;": "\xa8", - "DoubleDownArrow;": "\u21d3", - "DoubleLeftArrow;": "\u21d0", - "DoubleLeftRightArrow;": "\u21d4", - "DoubleLeftTee;": "\u2ae4", - "DoubleLongLeftArrow;": "\u27f8", - "DoubleLongLeftRightArrow;": "\u27fa", - "DoubleLongRightArrow;": "\u27f9", - "DoubleRightArrow;": "\u21d2", - "DoubleRightTee;": "\u22a8", - "DoubleUpArrow;": "\u21d1", - "DoubleUpDownArrow;": "\u21d5", - "DoubleVerticalBar;": "\u2225", - "DownArrow;": "\u2193", - "DownArrowBar;": "\u2913", - "DownArrowUpArrow;": "\u21f5", - "DownBreve;": "\u0311", - "DownLeftRightVector;": "\u2950", - "DownLeftTeeVector;": "\u295e", - "DownLeftVector;": "\u21bd", - "DownLeftVectorBar;": "\u2956", - "DownRightTeeVector;": "\u295f", - "DownRightVector;": "\u21c1", - "DownRightVectorBar;": "\u2957", - "DownTee;": "\u22a4", - "DownTeeArrow;": "\u21a7", - "Downarrow;": "\u21d3", - "Dscr;": "\U0001d49f", - "Dstrok;": "\u0110", - "ENG;": "\u014a", - "ETH": "\xd0", - "ETH;": "\xd0", - "Eacute": "\xc9", - "Eacute;": "\xc9", - "Ecaron;": "\u011a", - "Ecirc": "\xca", - "Ecirc;": "\xca", - "Ecy;": "\u042d", - "Edot;": "\u0116", - "Efr;": "\U0001d508", - "Egrave": "\xc8", - "Egrave;": "\xc8", - "Element;": "\u2208", - "Emacr;": "\u0112", - "EmptySmallSquare;": "\u25fb", - "EmptyVerySmallSquare;": "\u25ab", - "Eogon;": "\u0118", - "Eopf;": "\U0001d53c", - "Epsilon;": "\u0395", - "Equal;": "\u2a75", - "EqualTilde;": "\u2242", - "Equilibrium;": "\u21cc", - "Escr;": "\u2130", - "Esim;": "\u2a73", - "Eta;": "\u0397", - "Euml": "\xcb", - "Euml;": "\xcb", - "Exists;": "\u2203", - "ExponentialE;": "\u2147", - "Fcy;": "\u0424", - "Ffr;": "\U0001d509", - "FilledSmallSquare;": "\u25fc", - "FilledVerySmallSquare;": "\u25aa", - "Fopf;": "\U0001d53d", - "ForAll;": "\u2200", - "Fouriertrf;": "\u2131", - "Fscr;": "\u2131", - "GJcy;": "\u0403", - "GT": ">", - "GT;": ">", - "Gamma;": "\u0393", - "Gammad;": "\u03dc", - "Gbreve;": "\u011e", - "Gcedil;": "\u0122", - "Gcirc;": "\u011c", - "Gcy;": "\u0413", - "Gdot;": "\u0120", - "Gfr;": "\U0001d50a", - "Gg;": "\u22d9", - "Gopf;": "\U0001d53e", - "GreaterEqual;": "\u2265", - "GreaterEqualLess;": "\u22db", - "GreaterFullEqual;": "\u2267", - "GreaterGreater;": "\u2aa2", - "GreaterLess;": "\u2277", - "GreaterSlantEqual;": "\u2a7e", - "GreaterTilde;": "\u2273", - "Gscr;": "\U0001d4a2", - "Gt;": "\u226b", - "HARDcy;": "\u042a", - "Hacek;": "\u02c7", - "Hat;": "^", - "Hcirc;": "\u0124", - "Hfr;": "\u210c", - "HilbertSpace;": "\u210b", - "Hopf;": "\u210d", - "HorizontalLine;": "\u2500", - "Hscr;": "\u210b", - "Hstrok;": "\u0126", - "HumpDownHump;": "\u224e", - "HumpEqual;": "\u224f", - "IEcy;": "\u0415", - "IJlig;": "\u0132", - "IOcy;": "\u0401", - "Iacute": "\xcd", - "Iacute;": "\xcd", - "Icirc": "\xce", - "Icirc;": "\xce", - "Icy;": "\u0418", - "Idot;": "\u0130", - "Ifr;": "\u2111", - "Igrave": "\xcc", - "Igrave;": "\xcc", - "Im;": "\u2111", - "Imacr;": "\u012a", - "ImaginaryI;": "\u2148", - "Implies;": "\u21d2", - "Int;": "\u222c", - "Integral;": "\u222b", - "Intersection;": "\u22c2", - "InvisibleComma;": "\u2063", - "InvisibleTimes;": "\u2062", - "Iogon;": "\u012e", - "Iopf;": "\U0001d540", - "Iota;": "\u0399", - "Iscr;": "\u2110", - "Itilde;": "\u0128", - "Iukcy;": "\u0406", - "Iuml": "\xcf", - "Iuml;": "\xcf", - "Jcirc;": "\u0134", - "Jcy;": "\u0419", - "Jfr;": "\U0001d50d", - "Jopf;": "\U0001d541", - "Jscr;": "\U0001d4a5", - "Jsercy;": "\u0408", - "Jukcy;": "\u0404", - "KHcy;": "\u0425", - "KJcy;": "\u040c", - "Kappa;": "\u039a", - "Kcedil;": "\u0136", - "Kcy;": "\u041a", - "Kfr;": "\U0001d50e", - "Kopf;": "\U0001d542", - "Kscr;": "\U0001d4a6", - "LJcy;": "\u0409", - "LT": "<", - "LT;": "<", - "Lacute;": "\u0139", - "Lambda;": "\u039b", - "Lang;": "\u27ea", - "Laplacetrf;": "\u2112", - "Larr;": "\u219e", - "Lcaron;": "\u013d", - "Lcedil;": "\u013b", - "Lcy;": "\u041b", - "LeftAngleBracket;": "\u27e8", - "LeftArrow;": "\u2190", - "LeftArrowBar;": "\u21e4", - "LeftArrowRightArrow;": "\u21c6", - "LeftCeiling;": "\u2308", - "LeftDoubleBracket;": "\u27e6", - "LeftDownTeeVector;": "\u2961", - "LeftDownVector;": "\u21c3", - "LeftDownVectorBar;": "\u2959", - "LeftFloor;": "\u230a", - "LeftRightArrow;": "\u2194", - "LeftRightVector;": "\u294e", - "LeftTee;": "\u22a3", - "LeftTeeArrow;": "\u21a4", - "LeftTeeVector;": "\u295a", - "LeftTriangle;": "\u22b2", - "LeftTriangleBar;": "\u29cf", - "LeftTriangleEqual;": "\u22b4", - "LeftUpDownVector;": "\u2951", - "LeftUpTeeVector;": "\u2960", - "LeftUpVector;": "\u21bf", - "LeftUpVectorBar;": "\u2958", - "LeftVector;": "\u21bc", - "LeftVectorBar;": "\u2952", - "Leftarrow;": "\u21d0", - "Leftrightarrow;": "\u21d4", - "LessEqualGreater;": "\u22da", - "LessFullEqual;": "\u2266", - "LessGreater;": "\u2276", - "LessLess;": "\u2aa1", - "LessSlantEqual;": "\u2a7d", - "LessTilde;": "\u2272", - "Lfr;": "\U0001d50f", - "Ll;": "\u22d8", - "Lleftarrow;": "\u21da", - "Lmidot;": "\u013f", - "LongLeftArrow;": "\u27f5", - "LongLeftRightArrow;": "\u27f7", - "LongRightArrow;": "\u27f6", - "Longleftarrow;": "\u27f8", - "Longleftrightarrow;": "\u27fa", - "Longrightarrow;": "\u27f9", - "Lopf;": "\U0001d543", - "LowerLeftArrow;": "\u2199", - "LowerRightArrow;": "\u2198", - "Lscr;": "\u2112", - "Lsh;": "\u21b0", - "Lstrok;": "\u0141", - "Lt;": "\u226a", - "Map;": "\u2905", - "Mcy;": "\u041c", - "MediumSpace;": "\u205f", - "Mellintrf;": "\u2133", - "Mfr;": "\U0001d510", - "MinusPlus;": "\u2213", - "Mopf;": "\U0001d544", - "Mscr;": "\u2133", - "Mu;": "\u039c", - "NJcy;": "\u040a", - "Nacute;": "\u0143", - "Ncaron;": "\u0147", - "Ncedil;": "\u0145", - "Ncy;": "\u041d", - "NegativeMediumSpace;": "\u200b", - "NegativeThickSpace;": "\u200b", - "NegativeThinSpace;": "\u200b", - "NegativeVeryThinSpace;": "\u200b", - "NestedGreaterGreater;": "\u226b", - "NestedLessLess;": "\u226a", - "NewLine;": "\n", - "Nfr;": "\U0001d511", - "NoBreak;": "\u2060", - "NonBreakingSpace;": "\xa0", - "Nopf;": "\u2115", - "Not;": "\u2aec", - "NotCongruent;": "\u2262", - "NotCupCap;": "\u226d", - "NotDoubleVerticalBar;": "\u2226", - "NotElement;": "\u2209", - "NotEqual;": "\u2260", - "NotEqualTilde;": "\u2242\u0338", - "NotExists;": "\u2204", - "NotGreater;": "\u226f", - "NotGreaterEqual;": "\u2271", - "NotGreaterFullEqual;": "\u2267\u0338", - "NotGreaterGreater;": "\u226b\u0338", - "NotGreaterLess;": "\u2279", - "NotGreaterSlantEqual;": "\u2a7e\u0338", - "NotGreaterTilde;": "\u2275", - "NotHumpDownHump;": "\u224e\u0338", - "NotHumpEqual;": "\u224f\u0338", - "NotLeftTriangle;": "\u22ea", - "NotLeftTriangleBar;": "\u29cf\u0338", - "NotLeftTriangleEqual;": "\u22ec", - "NotLess;": "\u226e", - "NotLessEqual;": "\u2270", - "NotLessGreater;": "\u2278", - "NotLessLess;": "\u226a\u0338", - "NotLessSlantEqual;": "\u2a7d\u0338", - "NotLessTilde;": "\u2274", - "NotNestedGreaterGreater;": "\u2aa2\u0338", - "NotNestedLessLess;": "\u2aa1\u0338", - "NotPrecedes;": "\u2280", - "NotPrecedesEqual;": "\u2aaf\u0338", - "NotPrecedesSlantEqual;": "\u22e0", - "NotReverseElement;": "\u220c", - "NotRightTriangle;": "\u22eb", - "NotRightTriangleBar;": "\u29d0\u0338", - "NotRightTriangleEqual;": "\u22ed", - "NotSquareSubset;": "\u228f\u0338", - "NotSquareSubsetEqual;": "\u22e2", - "NotSquareSuperset;": "\u2290\u0338", - "NotSquareSupersetEqual;": "\u22e3", - "NotSubset;": "\u2282\u20d2", - "NotSubsetEqual;": "\u2288", - "NotSucceeds;": "\u2281", - "NotSucceedsEqual;": "\u2ab0\u0338", - "NotSucceedsSlantEqual;": "\u22e1", - "NotSucceedsTilde;": "\u227f\u0338", - "NotSuperset;": "\u2283\u20d2", - "NotSupersetEqual;": "\u2289", - "NotTilde;": "\u2241", - "NotTildeEqual;": "\u2244", - "NotTildeFullEqual;": "\u2247", - "NotTildeTilde;": "\u2249", - "NotVerticalBar;": "\u2224", - "Nscr;": "\U0001d4a9", - "Ntilde": "\xd1", - "Ntilde;": "\xd1", - "Nu;": "\u039d", - "OElig;": "\u0152", - "Oacute": "\xd3", - "Oacute;": "\xd3", - "Ocirc": "\xd4", - "Ocirc;": "\xd4", - "Ocy;": "\u041e", - "Odblac;": "\u0150", - "Ofr;": "\U0001d512", - "Ograve": "\xd2", - "Ograve;": "\xd2", - "Omacr;": "\u014c", - "Omega;": "\u03a9", - "Omicron;": "\u039f", - "Oopf;": "\U0001d546", - "OpenCurlyDoubleQuote;": "\u201c", - "OpenCurlyQuote;": "\u2018", - "Or;": "\u2a54", - "Oscr;": "\U0001d4aa", - "Oslash": "\xd8", - "Oslash;": "\xd8", - "Otilde": "\xd5", - "Otilde;": "\xd5", - "Otimes;": "\u2a37", - "Ouml": "\xd6", - "Ouml;": "\xd6", - "OverBar;": "\u203e", - "OverBrace;": "\u23de", - "OverBracket;": "\u23b4", - "OverParenthesis;": "\u23dc", - "PartialD;": "\u2202", - "Pcy;": "\u041f", - "Pfr;": "\U0001d513", - "Phi;": "\u03a6", - "Pi;": "\u03a0", - "PlusMinus;": "\xb1", - "Poincareplane;": "\u210c", - "Popf;": "\u2119", - "Pr;": "\u2abb", - "Precedes;": "\u227a", - "PrecedesEqual;": "\u2aaf", - "PrecedesSlantEqual;": "\u227c", - "PrecedesTilde;": "\u227e", - "Prime;": "\u2033", - "Product;": "\u220f", - "Proportion;": "\u2237", - "Proportional;": "\u221d", - "Pscr;": "\U0001d4ab", - "Psi;": "\u03a8", - "QUOT": "\"", - "QUOT;": "\"", - "Qfr;": "\U0001d514", - "Qopf;": "\u211a", - "Qscr;": "\U0001d4ac", - "RBarr;": "\u2910", - "REG": "\xae", - "REG;": "\xae", - "Racute;": "\u0154", - "Rang;": "\u27eb", - "Rarr;": "\u21a0", - "Rarrtl;": "\u2916", - "Rcaron;": "\u0158", - "Rcedil;": "\u0156", - "Rcy;": "\u0420", - "Re;": "\u211c", - "ReverseElement;": "\u220b", - "ReverseEquilibrium;": "\u21cb", - "ReverseUpEquilibrium;": "\u296f", - "Rfr;": "\u211c", - "Rho;": "\u03a1", - "RightAngleBracket;": "\u27e9", - "RightArrow;": "\u2192", - "RightArrowBar;": "\u21e5", - "RightArrowLeftArrow;": "\u21c4", - "RightCeiling;": "\u2309", - "RightDoubleBracket;": "\u27e7", - "RightDownTeeVector;": "\u295d", - "RightDownVector;": "\u21c2", - "RightDownVectorBar;": "\u2955", - "RightFloor;": "\u230b", - "RightTee;": "\u22a2", - "RightTeeArrow;": "\u21a6", - "RightTeeVector;": "\u295b", - "RightTriangle;": "\u22b3", - "RightTriangleBar;": "\u29d0", - "RightTriangleEqual;": "\u22b5", - "RightUpDownVector;": "\u294f", - "RightUpTeeVector;": "\u295c", - "RightUpVector;": "\u21be", - "RightUpVectorBar;": "\u2954", - "RightVector;": "\u21c0", - "RightVectorBar;": "\u2953", - "Rightarrow;": "\u21d2", - "Ropf;": "\u211d", - "RoundImplies;": "\u2970", - "Rrightarrow;": "\u21db", - "Rscr;": "\u211b", - "Rsh;": "\u21b1", - "RuleDelayed;": "\u29f4", - "SHCHcy;": "\u0429", - "SHcy;": "\u0428", - "SOFTcy;": "\u042c", - "Sacute;": "\u015a", - "Sc;": "\u2abc", - "Scaron;": "\u0160", - "Scedil;": "\u015e", - "Scirc;": "\u015c", - "Scy;": "\u0421", - "Sfr;": "\U0001d516", - "ShortDownArrow;": "\u2193", - "ShortLeftArrow;": "\u2190", - "ShortRightArrow;": "\u2192", - "ShortUpArrow;": "\u2191", - "Sigma;": "\u03a3", - "SmallCircle;": "\u2218", - "Sopf;": "\U0001d54a", - "Sqrt;": "\u221a", - "Square;": "\u25a1", - "SquareIntersection;": "\u2293", - "SquareSubset;": "\u228f", - "SquareSubsetEqual;": "\u2291", - "SquareSuperset;": "\u2290", - "SquareSupersetEqual;": "\u2292", - "SquareUnion;": "\u2294", - "Sscr;": "\U0001d4ae", - "Star;": "\u22c6", - "Sub;": "\u22d0", - "Subset;": "\u22d0", - "SubsetEqual;": "\u2286", - "Succeeds;": "\u227b", - "SucceedsEqual;": "\u2ab0", - "SucceedsSlantEqual;": "\u227d", - "SucceedsTilde;": "\u227f", - "SuchThat;": "\u220b", - "Sum;": "\u2211", - "Sup;": "\u22d1", - "Superset;": "\u2283", - "SupersetEqual;": "\u2287", - "Supset;": "\u22d1", - "THORN": "\xde", - "THORN;": "\xde", - "TRADE;": "\u2122", - "TSHcy;": "\u040b", - "TScy;": "\u0426", - "Tab;": "\t", - "Tau;": "\u03a4", - "Tcaron;": "\u0164", - "Tcedil;": "\u0162", - "Tcy;": "\u0422", - "Tfr;": "\U0001d517", - "Therefore;": "\u2234", - "Theta;": "\u0398", - "ThickSpace;": "\u205f\u200a", - "ThinSpace;": "\u2009", - "Tilde;": "\u223c", - "TildeEqual;": "\u2243", - "TildeFullEqual;": "\u2245", - "TildeTilde;": "\u2248", - "Topf;": "\U0001d54b", - "TripleDot;": "\u20db", - "Tscr;": "\U0001d4af", - "Tstrok;": "\u0166", - "Uacute": "\xda", - "Uacute;": "\xda", - "Uarr;": "\u219f", - "Uarrocir;": "\u2949", - "Ubrcy;": "\u040e", - "Ubreve;": "\u016c", - "Ucirc": "\xdb", - "Ucirc;": "\xdb", - "Ucy;": "\u0423", - "Udblac;": "\u0170", - "Ufr;": "\U0001d518", - "Ugrave": "\xd9", - "Ugrave;": "\xd9", - "Umacr;": "\u016a", - "UnderBar;": "_", - "UnderBrace;": "\u23df", - "UnderBracket;": "\u23b5", - "UnderParenthesis;": "\u23dd", - "Union;": "\u22c3", - "UnionPlus;": "\u228e", - "Uogon;": "\u0172", - "Uopf;": "\U0001d54c", - "UpArrow;": "\u2191", - "UpArrowBar;": "\u2912", - "UpArrowDownArrow;": "\u21c5", - "UpDownArrow;": "\u2195", - "UpEquilibrium;": "\u296e", - "UpTee;": "\u22a5", - "UpTeeArrow;": "\u21a5", - "Uparrow;": "\u21d1", - "Updownarrow;": "\u21d5", - "UpperLeftArrow;": "\u2196", - "UpperRightArrow;": "\u2197", - "Upsi;": "\u03d2", - "Upsilon;": "\u03a5", - "Uring;": "\u016e", - "Uscr;": "\U0001d4b0", - "Utilde;": "\u0168", - "Uuml": "\xdc", - "Uuml;": "\xdc", - "VDash;": "\u22ab", - "Vbar;": "\u2aeb", - "Vcy;": "\u0412", - "Vdash;": "\u22a9", - "Vdashl;": "\u2ae6", - "Vee;": "\u22c1", - "Verbar;": "\u2016", - "Vert;": "\u2016", - "VerticalBar;": "\u2223", - "VerticalLine;": "|", - "VerticalSeparator;": "\u2758", - "VerticalTilde;": "\u2240", - "VeryThinSpace;": "\u200a", - "Vfr;": "\U0001d519", - "Vopf;": "\U0001d54d", - "Vscr;": "\U0001d4b1", - "Vvdash;": "\u22aa", - "Wcirc;": "\u0174", - "Wedge;": "\u22c0", - "Wfr;": "\U0001d51a", - "Wopf;": "\U0001d54e", - "Wscr;": "\U0001d4b2", - "Xfr;": "\U0001d51b", - "Xi;": "\u039e", - "Xopf;": "\U0001d54f", - "Xscr;": "\U0001d4b3", - "YAcy;": "\u042f", - "YIcy;": "\u0407", - "YUcy;": "\u042e", - "Yacute": "\xdd", - "Yacute;": "\xdd", - "Ycirc;": "\u0176", - "Ycy;": "\u042b", - "Yfr;": "\U0001d51c", - "Yopf;": "\U0001d550", - "Yscr;": "\U0001d4b4", - "Yuml;": "\u0178", - "ZHcy;": "\u0416", - "Zacute;": "\u0179", - "Zcaron;": "\u017d", - "Zcy;": "\u0417", - "Zdot;": "\u017b", - "ZeroWidthSpace;": "\u200b", - "Zeta;": "\u0396", - "Zfr;": "\u2128", - "Zopf;": "\u2124", - "Zscr;": "\U0001d4b5", - "aacute": "\xe1", - "aacute;": "\xe1", - "abreve;": "\u0103", - "ac;": "\u223e", - "acE;": "\u223e\u0333", - "acd;": "\u223f", - "acirc": "\xe2", - "acirc;": "\xe2", - "acute": "\xb4", - "acute;": "\xb4", - "acy;": "\u0430", - "aelig": "\xe6", - "aelig;": "\xe6", - "af;": "\u2061", - "afr;": "\U0001d51e", - "agrave": "\xe0", - "agrave;": "\xe0", - "alefsym;": "\u2135", - "aleph;": "\u2135", - "alpha;": "\u03b1", - "amacr;": "\u0101", - "amalg;": "\u2a3f", - "amp": "&", - "amp;": "&", - "and;": "\u2227", - "andand;": "\u2a55", - "andd;": "\u2a5c", - "andslope;": "\u2a58", - "andv;": "\u2a5a", - "ang;": "\u2220", - "ange;": "\u29a4", - "angle;": "\u2220", - "angmsd;": "\u2221", - "angmsdaa;": "\u29a8", - "angmsdab;": "\u29a9", - "angmsdac;": "\u29aa", - "angmsdad;": "\u29ab", - "angmsdae;": "\u29ac", - "angmsdaf;": "\u29ad", - "angmsdag;": "\u29ae", - "angmsdah;": "\u29af", - "angrt;": "\u221f", - "angrtvb;": "\u22be", - "angrtvbd;": "\u299d", - "angsph;": "\u2222", - "angst;": "\xc5", - "angzarr;": "\u237c", - "aogon;": "\u0105", - "aopf;": "\U0001d552", - "ap;": "\u2248", - "apE;": "\u2a70", - "apacir;": "\u2a6f", - "ape;": "\u224a", - "apid;": "\u224b", - "apos;": "'", - "approx;": "\u2248", - "approxeq;": "\u224a", - "aring": "\xe5", - "aring;": "\xe5", - "ascr;": "\U0001d4b6", - "ast;": "*", - "asymp;": "\u2248", - "asympeq;": "\u224d", - "atilde": "\xe3", - "atilde;": "\xe3", - "auml": "\xe4", - "auml;": "\xe4", - "awconint;": "\u2233", - "awint;": "\u2a11", - "bNot;": "\u2aed", - "backcong;": "\u224c", - "backepsilon;": "\u03f6", - "backprime;": "\u2035", - "backsim;": "\u223d", - "backsimeq;": "\u22cd", - "barvee;": "\u22bd", - "barwed;": "\u2305", - "barwedge;": "\u2305", - "bbrk;": "\u23b5", - "bbrktbrk;": "\u23b6", - "bcong;": "\u224c", - "bcy;": "\u0431", - "bdquo;": "\u201e", - "becaus;": "\u2235", - "because;": "\u2235", - "bemptyv;": "\u29b0", - "bepsi;": "\u03f6", - "bernou;": "\u212c", - "beta;": "\u03b2", - "beth;": "\u2136", - "between;": "\u226c", - "bfr;": "\U0001d51f", - "bigcap;": "\u22c2", - "bigcirc;": "\u25ef", - "bigcup;": "\u22c3", - "bigodot;": "\u2a00", - "bigoplus;": "\u2a01", - "bigotimes;": "\u2a02", - "bigsqcup;": "\u2a06", - "bigstar;": "\u2605", - "bigtriangledown;": "\u25bd", - "bigtriangleup;": "\u25b3", - "biguplus;": "\u2a04", - "bigvee;": "\u22c1", - "bigwedge;": "\u22c0", - "bkarow;": "\u290d", - "blacklozenge;": "\u29eb", - "blacksquare;": "\u25aa", - "blacktriangle;": "\u25b4", - "blacktriangledown;": "\u25be", - "blacktriangleleft;": "\u25c2", - "blacktriangleright;": "\u25b8", - "blank;": "\u2423", - "blk12;": "\u2592", - "blk14;": "\u2591", - "blk34;": "\u2593", - "block;": "\u2588", - "bne;": "=\u20e5", - "bnequiv;": "\u2261\u20e5", - "bnot;": "\u2310", - "bopf;": "\U0001d553", - "bot;": "\u22a5", - "bottom;": "\u22a5", - "bowtie;": "\u22c8", - "boxDL;": "\u2557", - "boxDR;": "\u2554", - "boxDl;": "\u2556", - "boxDr;": "\u2553", - "boxH;": "\u2550", - "boxHD;": "\u2566", - "boxHU;": "\u2569", - "boxHd;": "\u2564", - "boxHu;": "\u2567", - "boxUL;": "\u255d", - "boxUR;": "\u255a", - "boxUl;": "\u255c", - "boxUr;": "\u2559", - "boxV;": "\u2551", - "boxVH;": "\u256c", - "boxVL;": "\u2563", - "boxVR;": "\u2560", - "boxVh;": "\u256b", - "boxVl;": "\u2562", - "boxVr;": "\u255f", - "boxbox;": "\u29c9", - "boxdL;": "\u2555", - "boxdR;": "\u2552", - "boxdl;": "\u2510", - "boxdr;": "\u250c", - "boxh;": "\u2500", - "boxhD;": "\u2565", - "boxhU;": "\u2568", - "boxhd;": "\u252c", - "boxhu;": "\u2534", - "boxminus;": "\u229f", - "boxplus;": "\u229e", - "boxtimes;": "\u22a0", - "boxuL;": "\u255b", - "boxuR;": "\u2558", - "boxul;": "\u2518", - "boxur;": "\u2514", - "boxv;": "\u2502", - "boxvH;": "\u256a", - "boxvL;": "\u2561", - "boxvR;": "\u255e", - "boxvh;": "\u253c", - "boxvl;": "\u2524", - "boxvr;": "\u251c", - "bprime;": "\u2035", - "breve;": "\u02d8", - "brvbar": "\xa6", - "brvbar;": "\xa6", - "bscr;": "\U0001d4b7", - "bsemi;": "\u204f", - "bsim;": "\u223d", - "bsime;": "\u22cd", - "bsol;": "\\", - "bsolb;": "\u29c5", - "bsolhsub;": "\u27c8", - "bull;": "\u2022", - "bullet;": "\u2022", - "bump;": "\u224e", - "bumpE;": "\u2aae", - "bumpe;": "\u224f", - "bumpeq;": "\u224f", - "cacute;": "\u0107", - "cap;": "\u2229", - "capand;": "\u2a44", - "capbrcup;": "\u2a49", - "capcap;": "\u2a4b", - "capcup;": "\u2a47", - "capdot;": "\u2a40", - "caps;": "\u2229\ufe00", - "caret;": "\u2041", - "caron;": "\u02c7", - "ccaps;": "\u2a4d", - "ccaron;": "\u010d", - "ccedil": "\xe7", - "ccedil;": "\xe7", - "ccirc;": "\u0109", - "ccups;": "\u2a4c", - "ccupssm;": "\u2a50", - "cdot;": "\u010b", - "cedil": "\xb8", - "cedil;": "\xb8", - "cemptyv;": "\u29b2", - "cent": "\xa2", - "cent;": "\xa2", - "centerdot;": "\xb7", - "cfr;": "\U0001d520", - "chcy;": "\u0447", - "check;": "\u2713", - "checkmark;": "\u2713", - "chi;": "\u03c7", - "cir;": "\u25cb", - "cirE;": "\u29c3", - "circ;": "\u02c6", - "circeq;": "\u2257", - "circlearrowleft;": "\u21ba", - "circlearrowright;": "\u21bb", - "circledR;": "\xae", - "circledS;": "\u24c8", - "circledast;": "\u229b", - "circledcirc;": "\u229a", - "circleddash;": "\u229d", - "cire;": "\u2257", - "cirfnint;": "\u2a10", - "cirmid;": "\u2aef", - "cirscir;": "\u29c2", - "clubs;": "\u2663", - "clubsuit;": "\u2663", - "colon;": ":", - "colone;": "\u2254", - "coloneq;": "\u2254", - "comma;": ",", - "commat;": "@", - "comp;": "\u2201", - "compfn;": "\u2218", - "complement;": "\u2201", - "complexes;": "\u2102", - "cong;": "\u2245", - "congdot;": "\u2a6d", - "conint;": "\u222e", - "copf;": "\U0001d554", - "coprod;": "\u2210", - "copy": "\xa9", - "copy;": "\xa9", - "copysr;": "\u2117", - "crarr;": "\u21b5", - "cross;": "\u2717", - "cscr;": "\U0001d4b8", - "csub;": "\u2acf", - "csube;": "\u2ad1", - "csup;": "\u2ad0", - "csupe;": "\u2ad2", - "ctdot;": "\u22ef", - "cudarrl;": "\u2938", - "cudarrr;": "\u2935", - "cuepr;": "\u22de", - "cuesc;": "\u22df", - "cularr;": "\u21b6", - "cularrp;": "\u293d", - "cup;": "\u222a", - "cupbrcap;": "\u2a48", - "cupcap;": "\u2a46", - "cupcup;": "\u2a4a", - "cupdot;": "\u228d", - "cupor;": "\u2a45", - "cups;": "\u222a\ufe00", - "curarr;": "\u21b7", - "curarrm;": "\u293c", - "curlyeqprec;": "\u22de", - "curlyeqsucc;": "\u22df", - "curlyvee;": "\u22ce", - "curlywedge;": "\u22cf", - "curren": "\xa4", - "curren;": "\xa4", - "curvearrowleft;": "\u21b6", - "curvearrowright;": "\u21b7", - "cuvee;": "\u22ce", - "cuwed;": "\u22cf", - "cwconint;": "\u2232", - "cwint;": "\u2231", - "cylcty;": "\u232d", - "dArr;": "\u21d3", - "dHar;": "\u2965", - "dagger;": "\u2020", - "daleth;": "\u2138", - "darr;": "\u2193", - "dash;": "\u2010", - "dashv;": "\u22a3", - "dbkarow;": "\u290f", - "dblac;": "\u02dd", - "dcaron;": "\u010f", - "dcy;": "\u0434", - "dd;": "\u2146", - "ddagger;": "\u2021", - "ddarr;": "\u21ca", - "ddotseq;": "\u2a77", - "deg": "\xb0", - "deg;": "\xb0", - "delta;": "\u03b4", - "demptyv;": "\u29b1", - "dfisht;": "\u297f", - "dfr;": "\U0001d521", - "dharl;": "\u21c3", - "dharr;": "\u21c2", - "diam;": "\u22c4", - "diamond;": "\u22c4", - "diamondsuit;": "\u2666", - "diams;": "\u2666", - "die;": "\xa8", - "digamma;": "\u03dd", - "disin;": "\u22f2", - "div;": "\xf7", - "divide": "\xf7", - "divide;": "\xf7", - "divideontimes;": "\u22c7", - "divonx;": "\u22c7", - "djcy;": "\u0452", - "dlcorn;": "\u231e", - "dlcrop;": "\u230d", - "dollar;": "$", - "dopf;": "\U0001d555", - "dot;": "\u02d9", - "doteq;": "\u2250", - "doteqdot;": "\u2251", - "dotminus;": "\u2238", - "dotplus;": "\u2214", - "dotsquare;": "\u22a1", - "doublebarwedge;": "\u2306", - "downarrow;": "\u2193", - "downdownarrows;": "\u21ca", - "downharpoonleft;": "\u21c3", - "downharpoonright;": "\u21c2", - "drbkarow;": "\u2910", - "drcorn;": "\u231f", - "drcrop;": "\u230c", - "dscr;": "\U0001d4b9", - "dscy;": "\u0455", - "dsol;": "\u29f6", - "dstrok;": "\u0111", - "dtdot;": "\u22f1", - "dtri;": "\u25bf", - "dtrif;": "\u25be", - "duarr;": "\u21f5", - "duhar;": "\u296f", - "dwangle;": "\u29a6", - "dzcy;": "\u045f", - "dzigrarr;": "\u27ff", - "eDDot;": "\u2a77", - "eDot;": "\u2251", - "eacute": "\xe9", - "eacute;": "\xe9", - "easter;": "\u2a6e", - "ecaron;": "\u011b", - "ecir;": "\u2256", - "ecirc": "\xea", - "ecirc;": "\xea", - "ecolon;": "\u2255", - "ecy;": "\u044d", - "edot;": "\u0117", - "ee;": "\u2147", - "efDot;": "\u2252", - "efr;": "\U0001d522", - "eg;": "\u2a9a", - "egrave": "\xe8", - "egrave;": "\xe8", - "egs;": "\u2a96", - "egsdot;": "\u2a98", - "el;": "\u2a99", - "elinters;": "\u23e7", - "ell;": "\u2113", - "els;": "\u2a95", - "elsdot;": "\u2a97", - "emacr;": "\u0113", - "empty;": "\u2205", - "emptyset;": "\u2205", - "emptyv;": "\u2205", - "emsp13;": "\u2004", - "emsp14;": "\u2005", - "emsp;": "\u2003", - "eng;": "\u014b", - "ensp;": "\u2002", - "eogon;": "\u0119", - "eopf;": "\U0001d556", - "epar;": "\u22d5", - "eparsl;": "\u29e3", - "eplus;": "\u2a71", - "epsi;": "\u03b5", - "epsilon;": "\u03b5", - "epsiv;": "\u03f5", - "eqcirc;": "\u2256", - "eqcolon;": "\u2255", - "eqsim;": "\u2242", - "eqslantgtr;": "\u2a96", - "eqslantless;": "\u2a95", - "equals;": "=", - "equest;": "\u225f", - "equiv;": "\u2261", - "equivDD;": "\u2a78", - "eqvparsl;": "\u29e5", - "erDot;": "\u2253", - "erarr;": "\u2971", - "escr;": "\u212f", - "esdot;": "\u2250", - "esim;": "\u2242", - "eta;": "\u03b7", - "eth": "\xf0", - "eth;": "\xf0", - "euml": "\xeb", - "euml;": "\xeb", - "euro;": "\u20ac", - "excl;": "!", - "exist;": "\u2203", - "expectation;": "\u2130", - "exponentiale;": "\u2147", - "fallingdotseq;": "\u2252", - "fcy;": "\u0444", - "female;": "\u2640", - "ffilig;": "\ufb03", - "fflig;": "\ufb00", - "ffllig;": "\ufb04", - "ffr;": "\U0001d523", - "filig;": "\ufb01", - "fjlig;": "fj", - "flat;": "\u266d", - "fllig;": "\ufb02", - "fltns;": "\u25b1", - "fnof;": "\u0192", - "fopf;": "\U0001d557", - "forall;": "\u2200", - "fork;": "\u22d4", - "forkv;": "\u2ad9", - "fpartint;": "\u2a0d", - "frac12": "\xbd", - "frac12;": "\xbd", - "frac13;": "\u2153", - "frac14": "\xbc", - "frac14;": "\xbc", - "frac15;": "\u2155", - "frac16;": "\u2159", - "frac18;": "\u215b", - "frac23;": "\u2154", - "frac25;": "\u2156", - "frac34": "\xbe", - "frac34;": "\xbe", - "frac35;": "\u2157", - "frac38;": "\u215c", - "frac45;": "\u2158", - "frac56;": "\u215a", - "frac58;": "\u215d", - "frac78;": "\u215e", - "frasl;": "\u2044", - "frown;": "\u2322", - "fscr;": "\U0001d4bb", - "gE;": "\u2267", - "gEl;": "\u2a8c", - "gacute;": "\u01f5", - "gamma;": "\u03b3", - "gammad;": "\u03dd", - "gap;": "\u2a86", - "gbreve;": "\u011f", - "gcirc;": "\u011d", - "gcy;": "\u0433", - "gdot;": "\u0121", - "ge;": "\u2265", - "gel;": "\u22db", - "geq;": "\u2265", - "geqq;": "\u2267", - "geqslant;": "\u2a7e", - "ges;": "\u2a7e", - "gescc;": "\u2aa9", - "gesdot;": "\u2a80", - "gesdoto;": "\u2a82", - "gesdotol;": "\u2a84", - "gesl;": "\u22db\ufe00", - "gesles;": "\u2a94", - "gfr;": "\U0001d524", - "gg;": "\u226b", - "ggg;": "\u22d9", - "gimel;": "\u2137", - "gjcy;": "\u0453", - "gl;": "\u2277", - "glE;": "\u2a92", - "gla;": "\u2aa5", - "glj;": "\u2aa4", - "gnE;": "\u2269", - "gnap;": "\u2a8a", - "gnapprox;": "\u2a8a", - "gne;": "\u2a88", - "gneq;": "\u2a88", - "gneqq;": "\u2269", - "gnsim;": "\u22e7", - "gopf;": "\U0001d558", - "grave;": "`", - "gscr;": "\u210a", - "gsim;": "\u2273", - "gsime;": "\u2a8e", - "gsiml;": "\u2a90", - "gt": ">", - "gt;": ">", - "gtcc;": "\u2aa7", - "gtcir;": "\u2a7a", - "gtdot;": "\u22d7", - "gtlPar;": "\u2995", - "gtquest;": "\u2a7c", - "gtrapprox;": "\u2a86", - "gtrarr;": "\u2978", - "gtrdot;": "\u22d7", - "gtreqless;": "\u22db", - "gtreqqless;": "\u2a8c", - "gtrless;": "\u2277", - "gtrsim;": "\u2273", - "gvertneqq;": "\u2269\ufe00", - "gvnE;": "\u2269\ufe00", - "hArr;": "\u21d4", - "hairsp;": "\u200a", - "half;": "\xbd", - "hamilt;": "\u210b", - "hardcy;": "\u044a", - "harr;": "\u2194", - "harrcir;": "\u2948", - "harrw;": "\u21ad", - "hbar;": "\u210f", - "hcirc;": "\u0125", - "hearts;": "\u2665", - "heartsuit;": "\u2665", - "hellip;": "\u2026", - "hercon;": "\u22b9", - "hfr;": "\U0001d525", - "hksearow;": "\u2925", - "hkswarow;": "\u2926", - "hoarr;": "\u21ff", - "homtht;": "\u223b", - "hookleftarrow;": "\u21a9", - "hookrightarrow;": "\u21aa", - "hopf;": "\U0001d559", - "horbar;": "\u2015", - "hscr;": "\U0001d4bd", - "hslash;": "\u210f", - "hstrok;": "\u0127", - "hybull;": "\u2043", - "hyphen;": "\u2010", - "iacute": "\xed", - "iacute;": "\xed", - "ic;": "\u2063", - "icirc": "\xee", - "icirc;": "\xee", - "icy;": "\u0438", - "iecy;": "\u0435", - "iexcl": "\xa1", - "iexcl;": "\xa1", - "iff;": "\u21d4", - "ifr;": "\U0001d526", - "igrave": "\xec", - "igrave;": "\xec", - "ii;": "\u2148", - "iiiint;": "\u2a0c", - "iiint;": "\u222d", - "iinfin;": "\u29dc", - "iiota;": "\u2129", - "ijlig;": "\u0133", - "imacr;": "\u012b", - "image;": "\u2111", - "imagline;": "\u2110", - "imagpart;": "\u2111", - "imath;": "\u0131", - "imof;": "\u22b7", - "imped;": "\u01b5", - "in;": "\u2208", - "incare;": "\u2105", - "infin;": "\u221e", - "infintie;": "\u29dd", - "inodot;": "\u0131", - "int;": "\u222b", - "intcal;": "\u22ba", - "integers;": "\u2124", - "intercal;": "\u22ba", - "intlarhk;": "\u2a17", - "intprod;": "\u2a3c", - "iocy;": "\u0451", - "iogon;": "\u012f", - "iopf;": "\U0001d55a", - "iota;": "\u03b9", - "iprod;": "\u2a3c", - "iquest": "\xbf", - "iquest;": "\xbf", - "iscr;": "\U0001d4be", - "isin;": "\u2208", - "isinE;": "\u22f9", - "isindot;": "\u22f5", - "isins;": "\u22f4", - "isinsv;": "\u22f3", - "isinv;": "\u2208", - "it;": "\u2062", - "itilde;": "\u0129", - "iukcy;": "\u0456", - "iuml": "\xef", - "iuml;": "\xef", - "jcirc;": "\u0135", - "jcy;": "\u0439", - "jfr;": "\U0001d527", - "jmath;": "\u0237", - "jopf;": "\U0001d55b", - "jscr;": "\U0001d4bf", - "jsercy;": "\u0458", - "jukcy;": "\u0454", - "kappa;": "\u03ba", - "kappav;": "\u03f0", - "kcedil;": "\u0137", - "kcy;": "\u043a", - "kfr;": "\U0001d528", - "kgreen;": "\u0138", - "khcy;": "\u0445", - "kjcy;": "\u045c", - "kopf;": "\U0001d55c", - "kscr;": "\U0001d4c0", - "lAarr;": "\u21da", - "lArr;": "\u21d0", - "lAtail;": "\u291b", - "lBarr;": "\u290e", - "lE;": "\u2266", - "lEg;": "\u2a8b", - "lHar;": "\u2962", - "lacute;": "\u013a", - "laemptyv;": "\u29b4", - "lagran;": "\u2112", - "lambda;": "\u03bb", - "lang;": "\u27e8", - "langd;": "\u2991", - "langle;": "\u27e8", - "lap;": "\u2a85", - "laquo": "\xab", - "laquo;": "\xab", - "larr;": "\u2190", - "larrb;": "\u21e4", - "larrbfs;": "\u291f", - "larrfs;": "\u291d", - "larrhk;": "\u21a9", - "larrlp;": "\u21ab", - "larrpl;": "\u2939", - "larrsim;": "\u2973", - "larrtl;": "\u21a2", - "lat;": "\u2aab", - "latail;": "\u2919", - "late;": "\u2aad", - "lates;": "\u2aad\ufe00", - "lbarr;": "\u290c", - "lbbrk;": "\u2772", - "lbrace;": "{", - "lbrack;": "[", - "lbrke;": "\u298b", - "lbrksld;": "\u298f", - "lbrkslu;": "\u298d", - "lcaron;": "\u013e", - "lcedil;": "\u013c", - "lceil;": "\u2308", - "lcub;": "{", - "lcy;": "\u043b", - "ldca;": "\u2936", - "ldquo;": "\u201c", - "ldquor;": "\u201e", - "ldrdhar;": "\u2967", - "ldrushar;": "\u294b", - "ldsh;": "\u21b2", - "le;": "\u2264", - "leftarrow;": "\u2190", - "leftarrowtail;": "\u21a2", - "leftharpoondown;": "\u21bd", - "leftharpoonup;": "\u21bc", - "leftleftarrows;": "\u21c7", - "leftrightarrow;": "\u2194", - "leftrightarrows;": "\u21c6", - "leftrightharpoons;": "\u21cb", - "leftrightsquigarrow;": "\u21ad", - "leftthreetimes;": "\u22cb", - "leg;": "\u22da", - "leq;": "\u2264", - "leqq;": "\u2266", - "leqslant;": "\u2a7d", - "les;": "\u2a7d", - "lescc;": "\u2aa8", - "lesdot;": "\u2a7f", - "lesdoto;": "\u2a81", - "lesdotor;": "\u2a83", - "lesg;": "\u22da\ufe00", - "lesges;": "\u2a93", - "lessapprox;": "\u2a85", - "lessdot;": "\u22d6", - "lesseqgtr;": "\u22da", - "lesseqqgtr;": "\u2a8b", - "lessgtr;": "\u2276", - "lesssim;": "\u2272", - "lfisht;": "\u297c", - "lfloor;": "\u230a", - "lfr;": "\U0001d529", - "lg;": "\u2276", - "lgE;": "\u2a91", - "lhard;": "\u21bd", - "lharu;": "\u21bc", - "lharul;": "\u296a", - "lhblk;": "\u2584", - "ljcy;": "\u0459", - "ll;": "\u226a", - "llarr;": "\u21c7", - "llcorner;": "\u231e", - "llhard;": "\u296b", - "lltri;": "\u25fa", - "lmidot;": "\u0140", - "lmoust;": "\u23b0", - "lmoustache;": "\u23b0", - "lnE;": "\u2268", - "lnap;": "\u2a89", - "lnapprox;": "\u2a89", - "lne;": "\u2a87", - "lneq;": "\u2a87", - "lneqq;": "\u2268", - "lnsim;": "\u22e6", - "loang;": "\u27ec", - "loarr;": "\u21fd", - "lobrk;": "\u27e6", - "longleftarrow;": "\u27f5", - "longleftrightarrow;": "\u27f7", - "longmapsto;": "\u27fc", - "longrightarrow;": "\u27f6", - "looparrowleft;": "\u21ab", - "looparrowright;": "\u21ac", - "lopar;": "\u2985", - "lopf;": "\U0001d55d", - "loplus;": "\u2a2d", - "lotimes;": "\u2a34", - "lowast;": "\u2217", - "lowbar;": "_", - "loz;": "\u25ca", - "lozenge;": "\u25ca", - "lozf;": "\u29eb", - "lpar;": "(", - "lparlt;": "\u2993", - "lrarr;": "\u21c6", - "lrcorner;": "\u231f", - "lrhar;": "\u21cb", - "lrhard;": "\u296d", - "lrm;": "\u200e", - "lrtri;": "\u22bf", - "lsaquo;": "\u2039", - "lscr;": "\U0001d4c1", - "lsh;": "\u21b0", - "lsim;": "\u2272", - "lsime;": "\u2a8d", - "lsimg;": "\u2a8f", - "lsqb;": "[", - "lsquo;": "\u2018", - "lsquor;": "\u201a", - "lstrok;": "\u0142", - "lt": "<", - "lt;": "<", - "ltcc;": "\u2aa6", - "ltcir;": "\u2a79", - "ltdot;": "\u22d6", - "lthree;": "\u22cb", - "ltimes;": "\u22c9", - "ltlarr;": "\u2976", - "ltquest;": "\u2a7b", - "ltrPar;": "\u2996", - "ltri;": "\u25c3", - "ltrie;": "\u22b4", - "ltrif;": "\u25c2", - "lurdshar;": "\u294a", - "luruhar;": "\u2966", - "lvertneqq;": "\u2268\ufe00", - "lvnE;": "\u2268\ufe00", - "mDDot;": "\u223a", - "macr": "\xaf", - "macr;": "\xaf", - "male;": "\u2642", - "malt;": "\u2720", - "maltese;": "\u2720", - "map;": "\u21a6", - "mapsto;": "\u21a6", - "mapstodown;": "\u21a7", - "mapstoleft;": "\u21a4", - "mapstoup;": "\u21a5", - "marker;": "\u25ae", - "mcomma;": "\u2a29", - "mcy;": "\u043c", - "mdash;": "\u2014", - "measuredangle;": "\u2221", - "mfr;": "\U0001d52a", - "mho;": "\u2127", - "micro": "\xb5", - "micro;": "\xb5", - "mid;": "\u2223", - "midast;": "*", - "midcir;": "\u2af0", - "middot": "\xb7", - "middot;": "\xb7", - "minus;": "\u2212", - "minusb;": "\u229f", - "minusd;": "\u2238", - "minusdu;": "\u2a2a", - "mlcp;": "\u2adb", - "mldr;": "\u2026", - "mnplus;": "\u2213", - "models;": "\u22a7", - "mopf;": "\U0001d55e", - "mp;": "\u2213", - "mscr;": "\U0001d4c2", - "mstpos;": "\u223e", - "mu;": "\u03bc", - "multimap;": "\u22b8", - "mumap;": "\u22b8", - "nGg;": "\u22d9\u0338", - "nGt;": "\u226b\u20d2", - "nGtv;": "\u226b\u0338", - "nLeftarrow;": "\u21cd", - "nLeftrightarrow;": "\u21ce", - "nLl;": "\u22d8\u0338", - "nLt;": "\u226a\u20d2", - "nLtv;": "\u226a\u0338", - "nRightarrow;": "\u21cf", - "nVDash;": "\u22af", - "nVdash;": "\u22ae", - "nabla;": "\u2207", - "nacute;": "\u0144", - "nang;": "\u2220\u20d2", - "nap;": "\u2249", - "napE;": "\u2a70\u0338", - "napid;": "\u224b\u0338", - "napos;": "\u0149", - "napprox;": "\u2249", - "natur;": "\u266e", - "natural;": "\u266e", - "naturals;": "\u2115", - "nbsp": "\xa0", - "nbsp;": "\xa0", - "nbump;": "\u224e\u0338", - "nbumpe;": "\u224f\u0338", - "ncap;": "\u2a43", - "ncaron;": "\u0148", - "ncedil;": "\u0146", - "ncong;": "\u2247", - "ncongdot;": "\u2a6d\u0338", - "ncup;": "\u2a42", - "ncy;": "\u043d", - "ndash;": "\u2013", - "ne;": "\u2260", - "neArr;": "\u21d7", - "nearhk;": "\u2924", - "nearr;": "\u2197", - "nearrow;": "\u2197", - "nedot;": "\u2250\u0338", - "nequiv;": "\u2262", - "nesear;": "\u2928", - "nesim;": "\u2242\u0338", - "nexist;": "\u2204", - "nexists;": "\u2204", - "nfr;": "\U0001d52b", - "ngE;": "\u2267\u0338", - "nge;": "\u2271", - "ngeq;": "\u2271", - "ngeqq;": "\u2267\u0338", - "ngeqslant;": "\u2a7e\u0338", - "nges;": "\u2a7e\u0338", - "ngsim;": "\u2275", - "ngt;": "\u226f", - "ngtr;": "\u226f", - "nhArr;": "\u21ce", - "nharr;": "\u21ae", - "nhpar;": "\u2af2", - "ni;": "\u220b", - "nis;": "\u22fc", - "nisd;": "\u22fa", - "niv;": "\u220b", - "njcy;": "\u045a", - "nlArr;": "\u21cd", - "nlE;": "\u2266\u0338", - "nlarr;": "\u219a", - "nldr;": "\u2025", - "nle;": "\u2270", - "nleftarrow;": "\u219a", - "nleftrightarrow;": "\u21ae", - "nleq;": "\u2270", - "nleqq;": "\u2266\u0338", - "nleqslant;": "\u2a7d\u0338", - "nles;": "\u2a7d\u0338", - "nless;": "\u226e", - "nlsim;": "\u2274", - "nlt;": "\u226e", - "nltri;": "\u22ea", - "nltrie;": "\u22ec", - "nmid;": "\u2224", - "nopf;": "\U0001d55f", - "not": "\xac", - "not;": "\xac", - "notin;": "\u2209", - "notinE;": "\u22f9\u0338", - "notindot;": "\u22f5\u0338", - "notinva;": "\u2209", - "notinvb;": "\u22f7", - "notinvc;": "\u22f6", - "notni;": "\u220c", - "notniva;": "\u220c", - "notnivb;": "\u22fe", - "notnivc;": "\u22fd", - "npar;": "\u2226", - "nparallel;": "\u2226", - "nparsl;": "\u2afd\u20e5", - "npart;": "\u2202\u0338", - "npolint;": "\u2a14", - "npr;": "\u2280", - "nprcue;": "\u22e0", - "npre;": "\u2aaf\u0338", - "nprec;": "\u2280", - "npreceq;": "\u2aaf\u0338", - "nrArr;": "\u21cf", - "nrarr;": "\u219b", - "nrarrc;": "\u2933\u0338", - "nrarrw;": "\u219d\u0338", - "nrightarrow;": "\u219b", - "nrtri;": "\u22eb", - "nrtrie;": "\u22ed", - "nsc;": "\u2281", - "nsccue;": "\u22e1", - "nsce;": "\u2ab0\u0338", - "nscr;": "\U0001d4c3", - "nshortmid;": "\u2224", - "nshortparallel;": "\u2226", - "nsim;": "\u2241", - "nsime;": "\u2244", - "nsimeq;": "\u2244", - "nsmid;": "\u2224", - "nspar;": "\u2226", - "nsqsube;": "\u22e2", - "nsqsupe;": "\u22e3", - "nsub;": "\u2284", - "nsubE;": "\u2ac5\u0338", - "nsube;": "\u2288", - "nsubset;": "\u2282\u20d2", - "nsubseteq;": "\u2288", - "nsubseteqq;": "\u2ac5\u0338", - "nsucc;": "\u2281", - "nsucceq;": "\u2ab0\u0338", - "nsup;": "\u2285", - "nsupE;": "\u2ac6\u0338", - "nsupe;": "\u2289", - "nsupset;": "\u2283\u20d2", - "nsupseteq;": "\u2289", - "nsupseteqq;": "\u2ac6\u0338", - "ntgl;": "\u2279", - "ntilde": "\xf1", - "ntilde;": "\xf1", - "ntlg;": "\u2278", - "ntriangleleft;": "\u22ea", - "ntrianglelefteq;": "\u22ec", - "ntriangleright;": "\u22eb", - "ntrianglerighteq;": "\u22ed", - "nu;": "\u03bd", - "num;": "#", - "numero;": "\u2116", - "numsp;": "\u2007", - "nvDash;": "\u22ad", - "nvHarr;": "\u2904", - "nvap;": "\u224d\u20d2", - "nvdash;": "\u22ac", - "nvge;": "\u2265\u20d2", - "nvgt;": ">\u20d2", - "nvinfin;": "\u29de", - "nvlArr;": "\u2902", - "nvle;": "\u2264\u20d2", - "nvlt;": "<\u20d2", - "nvltrie;": "\u22b4\u20d2", - "nvrArr;": "\u2903", - "nvrtrie;": "\u22b5\u20d2", - "nvsim;": "\u223c\u20d2", - "nwArr;": "\u21d6", - "nwarhk;": "\u2923", - "nwarr;": "\u2196", - "nwarrow;": "\u2196", - "nwnear;": "\u2927", - "oS;": "\u24c8", - "oacute": "\xf3", - "oacute;": "\xf3", - "oast;": "\u229b", - "ocir;": "\u229a", - "ocirc": "\xf4", - "ocirc;": "\xf4", - "ocy;": "\u043e", - "odash;": "\u229d", - "odblac;": "\u0151", - "odiv;": "\u2a38", - "odot;": "\u2299", - "odsold;": "\u29bc", - "oelig;": "\u0153", - "ofcir;": "\u29bf", - "ofr;": "\U0001d52c", - "ogon;": "\u02db", - "ograve": "\xf2", - "ograve;": "\xf2", - "ogt;": "\u29c1", - "ohbar;": "\u29b5", - "ohm;": "\u03a9", - "oint;": "\u222e", - "olarr;": "\u21ba", - "olcir;": "\u29be", - "olcross;": "\u29bb", - "oline;": "\u203e", - "olt;": "\u29c0", - "omacr;": "\u014d", - "omega;": "\u03c9", - "omicron;": "\u03bf", - "omid;": "\u29b6", - "ominus;": "\u2296", - "oopf;": "\U0001d560", - "opar;": "\u29b7", - "operp;": "\u29b9", - "oplus;": "\u2295", - "or;": "\u2228", - "orarr;": "\u21bb", - "ord;": "\u2a5d", - "order;": "\u2134", - "orderof;": "\u2134", - "ordf": "\xaa", - "ordf;": "\xaa", - "ordm": "\xba", - "ordm;": "\xba", - "origof;": "\u22b6", - "oror;": "\u2a56", - "orslope;": "\u2a57", - "orv;": "\u2a5b", - "oscr;": "\u2134", - "oslash": "\xf8", - "oslash;": "\xf8", - "osol;": "\u2298", - "otilde": "\xf5", - "otilde;": "\xf5", - "otimes;": "\u2297", - "otimesas;": "\u2a36", - "ouml": "\xf6", - "ouml;": "\xf6", - "ovbar;": "\u233d", - "par;": "\u2225", - "para": "\xb6", - "para;": "\xb6", - "parallel;": "\u2225", - "parsim;": "\u2af3", - "parsl;": "\u2afd", - "part;": "\u2202", - "pcy;": "\u043f", - "percnt;": "%", - "period;": ".", - "permil;": "\u2030", - "perp;": "\u22a5", - "pertenk;": "\u2031", - "pfr;": "\U0001d52d", - "phi;": "\u03c6", - "phiv;": "\u03d5", - "phmmat;": "\u2133", - "phone;": "\u260e", - "pi;": "\u03c0", - "pitchfork;": "\u22d4", - "piv;": "\u03d6", - "planck;": "\u210f", - "planckh;": "\u210e", - "plankv;": "\u210f", - "plus;": "+", - "plusacir;": "\u2a23", - "plusb;": "\u229e", - "pluscir;": "\u2a22", - "plusdo;": "\u2214", - "plusdu;": "\u2a25", - "pluse;": "\u2a72", - "plusmn": "\xb1", - "plusmn;": "\xb1", - "plussim;": "\u2a26", - "plustwo;": "\u2a27", - "pm;": "\xb1", - "pointint;": "\u2a15", - "popf;": "\U0001d561", - "pound": "\xa3", - "pound;": "\xa3", - "pr;": "\u227a", - "prE;": "\u2ab3", - "prap;": "\u2ab7", - "prcue;": "\u227c", - "pre;": "\u2aaf", - "prec;": "\u227a", - "precapprox;": "\u2ab7", - "preccurlyeq;": "\u227c", - "preceq;": "\u2aaf", - "precnapprox;": "\u2ab9", - "precneqq;": "\u2ab5", - "precnsim;": "\u22e8", - "precsim;": "\u227e", - "prime;": "\u2032", - "primes;": "\u2119", - "prnE;": "\u2ab5", - "prnap;": "\u2ab9", - "prnsim;": "\u22e8", - "prod;": "\u220f", - "profalar;": "\u232e", - "profline;": "\u2312", - "profsurf;": "\u2313", - "prop;": "\u221d", - "propto;": "\u221d", - "prsim;": "\u227e", - "prurel;": "\u22b0", - "pscr;": "\U0001d4c5", - "psi;": "\u03c8", - "puncsp;": "\u2008", - "qfr;": "\U0001d52e", - "qint;": "\u2a0c", - "qopf;": "\U0001d562", - "qprime;": "\u2057", - "qscr;": "\U0001d4c6", - "quaternions;": "\u210d", - "quatint;": "\u2a16", - "quest;": "?", - "questeq;": "\u225f", - "quot": "\"", - "quot;": "\"", - "rAarr;": "\u21db", - "rArr;": "\u21d2", - "rAtail;": "\u291c", - "rBarr;": "\u290f", - "rHar;": "\u2964", - "race;": "\u223d\u0331", - "racute;": "\u0155", - "radic;": "\u221a", - "raemptyv;": "\u29b3", - "rang;": "\u27e9", - "rangd;": "\u2992", - "range;": "\u29a5", - "rangle;": "\u27e9", - "raquo": "\xbb", - "raquo;": "\xbb", - "rarr;": "\u2192", - "rarrap;": "\u2975", - "rarrb;": "\u21e5", - "rarrbfs;": "\u2920", - "rarrc;": "\u2933", - "rarrfs;": "\u291e", - "rarrhk;": "\u21aa", - "rarrlp;": "\u21ac", - "rarrpl;": "\u2945", - "rarrsim;": "\u2974", - "rarrtl;": "\u21a3", - "rarrw;": "\u219d", - "ratail;": "\u291a", - "ratio;": "\u2236", - "rationals;": "\u211a", - "rbarr;": "\u290d", - "rbbrk;": "\u2773", - "rbrace;": "}", - "rbrack;": "]", - "rbrke;": "\u298c", - "rbrksld;": "\u298e", - "rbrkslu;": "\u2990", - "rcaron;": "\u0159", - "rcedil;": "\u0157", - "rceil;": "\u2309", - "rcub;": "}", - "rcy;": "\u0440", - "rdca;": "\u2937", - "rdldhar;": "\u2969", - "rdquo;": "\u201d", - "rdquor;": "\u201d", - "rdsh;": "\u21b3", - "real;": "\u211c", - "realine;": "\u211b", - "realpart;": "\u211c", - "reals;": "\u211d", - "rect;": "\u25ad", - "reg": "\xae", - "reg;": "\xae", - "rfisht;": "\u297d", - "rfloor;": "\u230b", - "rfr;": "\U0001d52f", - "rhard;": "\u21c1", - "rharu;": "\u21c0", - "rharul;": "\u296c", - "rho;": "\u03c1", - "rhov;": "\u03f1", - "rightarrow;": "\u2192", - "rightarrowtail;": "\u21a3", - "rightharpoondown;": "\u21c1", - "rightharpoonup;": "\u21c0", - "rightleftarrows;": "\u21c4", - "rightleftharpoons;": "\u21cc", - "rightrightarrows;": "\u21c9", - "rightsquigarrow;": "\u219d", - "rightthreetimes;": "\u22cc", - "ring;": "\u02da", - "risingdotseq;": "\u2253", - "rlarr;": "\u21c4", - "rlhar;": "\u21cc", - "rlm;": "\u200f", - "rmoust;": "\u23b1", - "rmoustache;": "\u23b1", - "rnmid;": "\u2aee", - "roang;": "\u27ed", - "roarr;": "\u21fe", - "robrk;": "\u27e7", - "ropar;": "\u2986", - "ropf;": "\U0001d563", - "roplus;": "\u2a2e", - "rotimes;": "\u2a35", - "rpar;": ")", - "rpargt;": "\u2994", - "rppolint;": "\u2a12", - "rrarr;": "\u21c9", - "rsaquo;": "\u203a", - "rscr;": "\U0001d4c7", - "rsh;": "\u21b1", - "rsqb;": "]", - "rsquo;": "\u2019", - "rsquor;": "\u2019", - "rthree;": "\u22cc", - "rtimes;": "\u22ca", - "rtri;": "\u25b9", - "rtrie;": "\u22b5", - "rtrif;": "\u25b8", - "rtriltri;": "\u29ce", - "ruluhar;": "\u2968", - "rx;": "\u211e", - "sacute;": "\u015b", - "sbquo;": "\u201a", - "sc;": "\u227b", - "scE;": "\u2ab4", - "scap;": "\u2ab8", - "scaron;": "\u0161", - "sccue;": "\u227d", - "sce;": "\u2ab0", - "scedil;": "\u015f", - "scirc;": "\u015d", - "scnE;": "\u2ab6", - "scnap;": "\u2aba", - "scnsim;": "\u22e9", - "scpolint;": "\u2a13", - "scsim;": "\u227f", - "scy;": "\u0441", - "sdot;": "\u22c5", - "sdotb;": "\u22a1", - "sdote;": "\u2a66", - "seArr;": "\u21d8", - "searhk;": "\u2925", - "searr;": "\u2198", - "searrow;": "\u2198", - "sect": "\xa7", - "sect;": "\xa7", - "semi;": ";", - "seswar;": "\u2929", - "setminus;": "\u2216", - "setmn;": "\u2216", - "sext;": "\u2736", - "sfr;": "\U0001d530", - "sfrown;": "\u2322", - "sharp;": "\u266f", - "shchcy;": "\u0449", - "shcy;": "\u0448", - "shortmid;": "\u2223", - "shortparallel;": "\u2225", - "shy": "\xad", - "shy;": "\xad", - "sigma;": "\u03c3", - "sigmaf;": "\u03c2", - "sigmav;": "\u03c2", - "sim;": "\u223c", - "simdot;": "\u2a6a", - "sime;": "\u2243", - "simeq;": "\u2243", - "simg;": "\u2a9e", - "simgE;": "\u2aa0", - "siml;": "\u2a9d", - "simlE;": "\u2a9f", - "simne;": "\u2246", - "simplus;": "\u2a24", - "simrarr;": "\u2972", - "slarr;": "\u2190", - "smallsetminus;": "\u2216", - "smashp;": "\u2a33", - "smeparsl;": "\u29e4", - "smid;": "\u2223", - "smile;": "\u2323", - "smt;": "\u2aaa", - "smte;": "\u2aac", - "smtes;": "\u2aac\ufe00", - "softcy;": "\u044c", - "sol;": "/", - "solb;": "\u29c4", - "solbar;": "\u233f", - "sopf;": "\U0001d564", - "spades;": "\u2660", - "spadesuit;": "\u2660", - "spar;": "\u2225", - "sqcap;": "\u2293", - "sqcaps;": "\u2293\ufe00", - "sqcup;": "\u2294", - "sqcups;": "\u2294\ufe00", - "sqsub;": "\u228f", - "sqsube;": "\u2291", - "sqsubset;": "\u228f", - "sqsubseteq;": "\u2291", - "sqsup;": "\u2290", - "sqsupe;": "\u2292", - "sqsupset;": "\u2290", - "sqsupseteq;": "\u2292", - "squ;": "\u25a1", - "square;": "\u25a1", - "squarf;": "\u25aa", - "squf;": "\u25aa", - "srarr;": "\u2192", - "sscr;": "\U0001d4c8", - "ssetmn;": "\u2216", - "ssmile;": "\u2323", - "sstarf;": "\u22c6", - "star;": "\u2606", - "starf;": "\u2605", - "straightepsilon;": "\u03f5", - "straightphi;": "\u03d5", - "strns;": "\xaf", - "sub;": "\u2282", - "subE;": "\u2ac5", - "subdot;": "\u2abd", - "sube;": "\u2286", - "subedot;": "\u2ac3", - "submult;": "\u2ac1", - "subnE;": "\u2acb", - "subne;": "\u228a", - "subplus;": "\u2abf", - "subrarr;": "\u2979", - "subset;": "\u2282", - "subseteq;": "\u2286", - "subseteqq;": "\u2ac5", - "subsetneq;": "\u228a", - "subsetneqq;": "\u2acb", - "subsim;": "\u2ac7", - "subsub;": "\u2ad5", - "subsup;": "\u2ad3", - "succ;": "\u227b", - "succapprox;": "\u2ab8", - "succcurlyeq;": "\u227d", - "succeq;": "\u2ab0", - "succnapprox;": "\u2aba", - "succneqq;": "\u2ab6", - "succnsim;": "\u22e9", - "succsim;": "\u227f", - "sum;": "\u2211", - "sung;": "\u266a", - "sup1": "\xb9", - "sup1;": "\xb9", - "sup2": "\xb2", - "sup2;": "\xb2", - "sup3": "\xb3", - "sup3;": "\xb3", - "sup;": "\u2283", - "supE;": "\u2ac6", - "supdot;": "\u2abe", - "supdsub;": "\u2ad8", - "supe;": "\u2287", - "supedot;": "\u2ac4", - "suphsol;": "\u27c9", - "suphsub;": "\u2ad7", - "suplarr;": "\u297b", - "supmult;": "\u2ac2", - "supnE;": "\u2acc", - "supne;": "\u228b", - "supplus;": "\u2ac0", - "supset;": "\u2283", - "supseteq;": "\u2287", - "supseteqq;": "\u2ac6", - "supsetneq;": "\u228b", - "supsetneqq;": "\u2acc", - "supsim;": "\u2ac8", - "supsub;": "\u2ad4", - "supsup;": "\u2ad6", - "swArr;": "\u21d9", - "swarhk;": "\u2926", - "swarr;": "\u2199", - "swarrow;": "\u2199", - "swnwar;": "\u292a", - "szlig": "\xdf", - "szlig;": "\xdf", - "target;": "\u2316", - "tau;": "\u03c4", - "tbrk;": "\u23b4", - "tcaron;": "\u0165", - "tcedil;": "\u0163", - "tcy;": "\u0442", - "tdot;": "\u20db", - "telrec;": "\u2315", - "tfr;": "\U0001d531", - "there4;": "\u2234", - "therefore;": "\u2234", - "theta;": "\u03b8", - "thetasym;": "\u03d1", - "thetav;": "\u03d1", - "thickapprox;": "\u2248", - "thicksim;": "\u223c", - "thinsp;": "\u2009", - "thkap;": "\u2248", - "thksim;": "\u223c", - "thorn": "\xfe", - "thorn;": "\xfe", - "tilde;": "\u02dc", - "times": "\xd7", - "times;": "\xd7", - "timesb;": "\u22a0", - "timesbar;": "\u2a31", - "timesd;": "\u2a30", - "tint;": "\u222d", - "toea;": "\u2928", - "top;": "\u22a4", - "topbot;": "\u2336", - "topcir;": "\u2af1", - "topf;": "\U0001d565", - "topfork;": "\u2ada", - "tosa;": "\u2929", - "tprime;": "\u2034", - "trade;": "\u2122", - "triangle;": "\u25b5", - "triangledown;": "\u25bf", - "triangleleft;": "\u25c3", - "trianglelefteq;": "\u22b4", - "triangleq;": "\u225c", - "triangleright;": "\u25b9", - "trianglerighteq;": "\u22b5", - "tridot;": "\u25ec", - "trie;": "\u225c", - "triminus;": "\u2a3a", - "triplus;": "\u2a39", - "trisb;": "\u29cd", - "tritime;": "\u2a3b", - "trpezium;": "\u23e2", - "tscr;": "\U0001d4c9", - "tscy;": "\u0446", - "tshcy;": "\u045b", - "tstrok;": "\u0167", - "twixt;": "\u226c", - "twoheadleftarrow;": "\u219e", - "twoheadrightarrow;": "\u21a0", - "uArr;": "\u21d1", - "uHar;": "\u2963", - "uacute": "\xfa", - "uacute;": "\xfa", - "uarr;": "\u2191", - "ubrcy;": "\u045e", - "ubreve;": "\u016d", - "ucirc": "\xfb", - "ucirc;": "\xfb", - "ucy;": "\u0443", - "udarr;": "\u21c5", - "udblac;": "\u0171", - "udhar;": "\u296e", - "ufisht;": "\u297e", - "ufr;": "\U0001d532", - "ugrave": "\xf9", - "ugrave;": "\xf9", - "uharl;": "\u21bf", - "uharr;": "\u21be", - "uhblk;": "\u2580", - "ulcorn;": "\u231c", - "ulcorner;": "\u231c", - "ulcrop;": "\u230f", - "ultri;": "\u25f8", - "umacr;": "\u016b", - "uml": "\xa8", - "uml;": "\xa8", - "uogon;": "\u0173", - "uopf;": "\U0001d566", - "uparrow;": "\u2191", - "updownarrow;": "\u2195", - "upharpoonleft;": "\u21bf", - "upharpoonright;": "\u21be", - "uplus;": "\u228e", - "upsi;": "\u03c5", - "upsih;": "\u03d2", - "upsilon;": "\u03c5", - "upuparrows;": "\u21c8", - "urcorn;": "\u231d", - "urcorner;": "\u231d", - "urcrop;": "\u230e", - "uring;": "\u016f", - "urtri;": "\u25f9", - "uscr;": "\U0001d4ca", - "utdot;": "\u22f0", - "utilde;": "\u0169", - "utri;": "\u25b5", - "utrif;": "\u25b4", - "uuarr;": "\u21c8", - "uuml": "\xfc", - "uuml;": "\xfc", - "uwangle;": "\u29a7", - "vArr;": "\u21d5", - "vBar;": "\u2ae8", - "vBarv;": "\u2ae9", - "vDash;": "\u22a8", - "vangrt;": "\u299c", - "varepsilon;": "\u03f5", - "varkappa;": "\u03f0", - "varnothing;": "\u2205", - "varphi;": "\u03d5", - "varpi;": "\u03d6", - "varpropto;": "\u221d", - "varr;": "\u2195", - "varrho;": "\u03f1", - "varsigma;": "\u03c2", - "varsubsetneq;": "\u228a\ufe00", - "varsubsetneqq;": "\u2acb\ufe00", - "varsupsetneq;": "\u228b\ufe00", - "varsupsetneqq;": "\u2acc\ufe00", - "vartheta;": "\u03d1", - "vartriangleleft;": "\u22b2", - "vartriangleright;": "\u22b3", - "vcy;": "\u0432", - "vdash;": "\u22a2", - "vee;": "\u2228", - "veebar;": "\u22bb", - "veeeq;": "\u225a", - "vellip;": "\u22ee", - "verbar;": "|", - "vert;": "|", - "vfr;": "\U0001d533", - "vltri;": "\u22b2", - "vnsub;": "\u2282\u20d2", - "vnsup;": "\u2283\u20d2", - "vopf;": "\U0001d567", - "vprop;": "\u221d", - "vrtri;": "\u22b3", - "vscr;": "\U0001d4cb", - "vsubnE;": "\u2acb\ufe00", - "vsubne;": "\u228a\ufe00", - "vsupnE;": "\u2acc\ufe00", - "vsupne;": "\u228b\ufe00", - "vzigzag;": "\u299a", - "wcirc;": "\u0175", - "wedbar;": "\u2a5f", - "wedge;": "\u2227", - "wedgeq;": "\u2259", - "weierp;": "\u2118", - "wfr;": "\U0001d534", - "wopf;": "\U0001d568", - "wp;": "\u2118", - "wr;": "\u2240", - "wreath;": "\u2240", - "wscr;": "\U0001d4cc", - "xcap;": "\u22c2", - "xcirc;": "\u25ef", - "xcup;": "\u22c3", - "xdtri;": "\u25bd", - "xfr;": "\U0001d535", - "xhArr;": "\u27fa", - "xharr;": "\u27f7", - "xi;": "\u03be", - "xlArr;": "\u27f8", - "xlarr;": "\u27f5", - "xmap;": "\u27fc", - "xnis;": "\u22fb", - "xodot;": "\u2a00", - "xopf;": "\U0001d569", - "xoplus;": "\u2a01", - "xotime;": "\u2a02", - "xrArr;": "\u27f9", - "xrarr;": "\u27f6", - "xscr;": "\U0001d4cd", - "xsqcup;": "\u2a06", - "xuplus;": "\u2a04", - "xutri;": "\u25b3", - "xvee;": "\u22c1", - "xwedge;": "\u22c0", - "yacute": "\xfd", - "yacute;": "\xfd", - "yacy;": "\u044f", - "ycirc;": "\u0177", - "ycy;": "\u044b", - "yen": "\xa5", - "yen;": "\xa5", - "yfr;": "\U0001d536", - "yicy;": "\u0457", - "yopf;": "\U0001d56a", - "yscr;": "\U0001d4ce", - "yucy;": "\u044e", - "yuml": "\xff", - "yuml;": "\xff", - "zacute;": "\u017a", - "zcaron;": "\u017e", - "zcy;": "\u0437", - "zdot;": "\u017c", - "zeetrf;": "\u2128", - "zeta;": "\u03b6", - "zfr;": "\U0001d537", - "zhcy;": "\u0436", - "zigrarr;": "\u21dd", - "zopf;": "\U0001d56b", - "zscr;": "\U0001d4cf", - "zwj;": "\u200d", - "zwnj;": "\u200c", -} - -replacementCharacters = { - 0x0: "\uFFFD", - 0x0d: "\u000D", - 0x80: "\u20AC", - 0x81: "\u0081", - 0x82: "\u201A", - 0x83: "\u0192", - 0x84: "\u201E", - 0x85: "\u2026", - 0x86: "\u2020", - 0x87: "\u2021", - 0x88: "\u02C6", - 0x89: "\u2030", - 0x8A: "\u0160", - 0x8B: "\u2039", - 0x8C: "\u0152", - 0x8D: "\u008D", - 0x8E: "\u017D", - 0x8F: "\u008F", - 0x90: "\u0090", - 0x91: "\u2018", - 0x92: "\u2019", - 0x93: "\u201C", - 0x94: "\u201D", - 0x95: "\u2022", - 0x96: "\u2013", - 0x97: "\u2014", - 0x98: "\u02DC", - 0x99: "\u2122", - 0x9A: "\u0161", - 0x9B: "\u203A", - 0x9C: "\u0153", - 0x9D: "\u009D", - 0x9E: "\u017E", - 0x9F: "\u0178", -} - -tokenTypes = { - "Doctype": 0, - "Characters": 1, - "SpaceCharacters": 2, - "StartTag": 3, - "EndTag": 4, - "EmptyTag": 5, - "Comment": 6, - "ParseError": 7 -} - -tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"], - tokenTypes["EmptyTag"]]) - - -prefixes = {v: k for k, v in namespaces.items()} -prefixes["http://www.w3.org/1998/Math/MathML"] = "math" - - -class DataLossWarning(UserWarning): - """Raised when the current tree is unable to represent the input data""" - pass - - -class _ReparseException(Exception): - pass diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 718e2e70bf2ce8b5ef9d1e61de9428309aebca01..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-39.pyc deleted file mode 100644 index c2b49c9b2310db18662b205f1b6983c0b05a1c48..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/base.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/base.cpython-39.pyc deleted file mode 100644 index 237ff0f2f9385e15be45cdab5c341becd497d6ac..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/base.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-39.pyc deleted file mode 100644 index cdbe4c7f8665fa6279cfcbffcbe17c0d1a25e0b8..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/lint.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/lint.cpython-39.pyc deleted file mode 100644 index 746c84a71069f3a21ac8990b00eb8980f522a672..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/lint.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-39.pyc deleted file mode 100644 index 97f1209672b7a0a158cbf078c5fbe40d3f56c5f0..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-39.pyc deleted file mode 100644 index 022692f3c500ebfdf162965f9026031a50e7d8be..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-39.pyc deleted file mode 100644 index 9758153889e21c84531717e3fdf67b592ef860de..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py b/env/Lib/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py deleted file mode 100644 index 5ba926e3b09a71121d3c10b962c26137221d5a34..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from . import base - -from collections import OrderedDict - - -def _attr_key(attr): - """Return an appropriate key for an attribute for sorting - - Attributes have a namespace that can be either ``None`` or a string. We - can't compare the two because they're different types, so we convert - ``None`` to an empty string first. - - """ - return (attr[0][0] or ''), attr[0][1] - - -class Filter(base.Filter): - """Alphabetizes attributes for elements""" - def __iter__(self): - for token in base.Filter.__iter__(self): - if token["type"] in ("StartTag", "EmptyTag"): - attrs = OrderedDict() - for name, value in sorted(token["data"].items(), - key=_attr_key): - attrs[name] = value - token["data"] = attrs - yield token diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/base.py b/env/Lib/site-packages/pip/_vendor/html5lib/filters/base.py deleted file mode 100644 index c7dbaed0fab5afb4f8947b24dd175d2239d35d6b..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/filters/base.py +++ /dev/null @@ -1,12 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - - -class Filter(object): - def __init__(self, source): - self.source = source - - def __iter__(self): - return iter(self.source) - - def __getattr__(self, name): - return getattr(self.source, name) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py b/env/Lib/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py deleted file mode 100644 index aefb5c842c2f55075546065cfba3c66d137e8184..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from . import base - - -class Filter(base.Filter): - """Injects ``<meta charset=ENCODING>`` tag into head of document""" - def __init__(self, source, encoding): - """Creates a Filter - - :arg source: the source token stream - - :arg encoding: the encoding to set - - """ - base.Filter.__init__(self, source) - self.encoding = encoding - - def __iter__(self): - state = "pre_head" - meta_found = (self.encoding is None) - pending = [] - - for token in base.Filter.__iter__(self): - type = token["type"] - if type == "StartTag": - if token["name"].lower() == "head": - state = "in_head" - - elif type == "EmptyTag": - if token["name"].lower() == "meta": - # replace charset with actual encoding - has_http_equiv_content_type = False - for (namespace, name), value in token["data"].items(): - if namespace is not None: - continue - elif name.lower() == 'charset': - token["data"][(namespace, name)] = self.encoding - meta_found = True - break - elif name == 'http-equiv' and value.lower() == 'content-type': - has_http_equiv_content_type = True - else: - if has_http_equiv_content_type and (None, "content") in token["data"]: - token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding - meta_found = True - - elif token["name"].lower() == "head" and not meta_found: - # insert meta into empty head - yield {"type": "StartTag", "name": "head", - "data": token["data"]} - yield {"type": "EmptyTag", "name": "meta", - "data": {(None, "charset"): self.encoding}} - yield {"type": "EndTag", "name": "head"} - meta_found = True - continue - - elif type == "EndTag": - if token["name"].lower() == "head" and pending: - # insert meta into head (if necessary) and flush pending queue - yield pending.pop(0) - if not meta_found: - yield {"type": "EmptyTag", "name": "meta", - "data": {(None, "charset"): self.encoding}} - while pending: - yield pending.pop(0) - meta_found = True - state = "post_head" - - if state == "in_head": - pending.append(token) - else: - yield token diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py b/env/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py deleted file mode 100644 index fcc07eec5b2e5ea926fa8b2af199e14c9cac50dd..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py +++ /dev/null @@ -1,93 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from pip._vendor.six import text_type - -from . import base -from ..constants import namespaces, voidElements - -from ..constants import spaceCharacters -spaceCharacters = "".join(spaceCharacters) - - -class Filter(base.Filter): - """Lints the token stream for errors - - If it finds any errors, it'll raise an ``AssertionError``. - - """ - def __init__(self, source, require_matching_tags=True): - """Creates a Filter - - :arg source: the source token stream - - :arg require_matching_tags: whether or not to require matching tags - - """ - super(Filter, self).__init__(source) - self.require_matching_tags = require_matching_tags - - def __iter__(self): - open_elements = [] - for token in base.Filter.__iter__(self): - type = token["type"] - if type in ("StartTag", "EmptyTag"): - namespace = token["namespace"] - name = token["name"] - assert namespace is None or isinstance(namespace, text_type) - assert namespace != "" - assert isinstance(name, text_type) - assert name != "" - assert isinstance(token["data"], dict) - if (not namespace or namespace == namespaces["html"]) and name in voidElements: - assert type == "EmptyTag" - else: - assert type == "StartTag" - if type == "StartTag" and self.require_matching_tags: - open_elements.append((namespace, name)) - for (namespace, name), value in token["data"].items(): - assert namespace is None or isinstance(namespace, text_type) - assert namespace != "" - assert isinstance(name, text_type) - assert name != "" - assert isinstance(value, text_type) - - elif type == "EndTag": - namespace = token["namespace"] - name = token["name"] - assert namespace is None or isinstance(namespace, text_type) - assert namespace != "" - assert isinstance(name, text_type) - assert name != "" - if (not namespace or namespace == namespaces["html"]) and name in voidElements: - assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name} - elif self.require_matching_tags: - start = open_elements.pop() - assert start == (namespace, name) - - elif type == "Comment": - data = token["data"] - assert isinstance(data, text_type) - - elif type in ("Characters", "SpaceCharacters"): - data = token["data"] - assert isinstance(data, text_type) - assert data != "" - if type == "SpaceCharacters": - assert data.strip(spaceCharacters) == "" - - elif type == "Doctype": - name = token["name"] - assert name is None or isinstance(name, text_type) - assert token["publicId"] is None or isinstance(name, text_type) - assert token["systemId"] is None or isinstance(name, text_type) - - elif type == "Entity": - assert isinstance(token["name"], text_type) - - elif type == "SerializerError": - assert isinstance(token["data"], text_type) - - else: - assert False, "Unknown token type: %(type)s" % {"type": type} - - yield token diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py b/env/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py deleted file mode 100644 index 4a865012c16237c3c4dd3404af7ed767b98d235a..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py +++ /dev/null @@ -1,207 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from . import base - - -class Filter(base.Filter): - """Removes optional tags from the token stream""" - def slider(self): - previous1 = previous2 = None - for token in self.source: - if previous1 is not None: - yield previous2, previous1, token - previous2 = previous1 - previous1 = token - if previous1 is not None: - yield previous2, previous1, None - - def __iter__(self): - for previous, token, next in self.slider(): - type = token["type"] - if type == "StartTag": - if (token["data"] or - not self.is_optional_start(token["name"], previous, next)): - yield token - elif type == "EndTag": - if not self.is_optional_end(token["name"], next): - yield token - else: - yield token - - def is_optional_start(self, tagname, previous, next): - type = next and next["type"] or None - if tagname in 'html': - # An html element's start tag may be omitted if the first thing - # inside the html element is not a space character or a comment. - return type not in ("Comment", "SpaceCharacters") - elif tagname == 'head': - # A head element's start tag may be omitted if the first thing - # inside the head element is an element. - # XXX: we also omit the start tag if the head element is empty - if type in ("StartTag", "EmptyTag"): - return True - elif type == "EndTag": - return next["name"] == "head" - elif tagname == 'body': - # A body element's start tag may be omitted if the first thing - # inside the body element is not a space character or a comment, - # except if the first thing inside the body element is a script - # or style element and the node immediately preceding the body - # element is a head element whose end tag has been omitted. - if type in ("Comment", "SpaceCharacters"): - return False - elif type == "StartTag": - # XXX: we do not look at the preceding event, so we never omit - # the body element's start tag if it's followed by a script or - # a style element. - return next["name"] not in ('script', 'style') - else: - return True - elif tagname == 'colgroup': - # A colgroup element's start tag may be omitted if the first thing - # inside the colgroup element is a col element, and if the element - # is not immediately preceded by another colgroup element whose - # end tag has been omitted. - if type in ("StartTag", "EmptyTag"): - # XXX: we do not look at the preceding event, so instead we never - # omit the colgroup element's end tag when it is immediately - # followed by another colgroup element. See is_optional_end. - return next["name"] == "col" - else: - return False - elif tagname == 'tbody': - # A tbody element's start tag may be omitted if the first thing - # inside the tbody element is a tr element, and if the element is - # not immediately preceded by a tbody, thead, or tfoot element - # whose end tag has been omitted. - if type == "StartTag": - # omit the thead and tfoot elements' end tag when they are - # immediately followed by a tbody element. See is_optional_end. - if previous and previous['type'] == 'EndTag' and \ - previous['name'] in ('tbody', 'thead', 'tfoot'): - return False - return next["name"] == 'tr' - else: - return False - return False - - def is_optional_end(self, tagname, next): - type = next and next["type"] or None - if tagname in ('html', 'head', 'body'): - # An html element's end tag may be omitted if the html element - # is not immediately followed by a space character or a comment. - return type not in ("Comment", "SpaceCharacters") - elif tagname in ('li', 'optgroup', 'tr'): - # A li element's end tag may be omitted if the li element is - # immediately followed by another li element or if there is - # no more content in the parent element. - # An optgroup element's end tag may be omitted if the optgroup - # element is immediately followed by another optgroup element, - # or if there is no more content in the parent element. - # A tr element's end tag may be omitted if the tr element is - # immediately followed by another tr element, or if there is - # no more content in the parent element. - if type == "StartTag": - return next["name"] == tagname - else: - return type == "EndTag" or type is None - elif tagname in ('dt', 'dd'): - # A dt element's end tag may be omitted if the dt element is - # immediately followed by another dt element or a dd element. - # A dd element's end tag may be omitted if the dd element is - # immediately followed by another dd element or a dt element, - # or if there is no more content in the parent element. - if type == "StartTag": - return next["name"] in ('dt', 'dd') - elif tagname == 'dd': - return type == "EndTag" or type is None - else: - return False - elif tagname == 'p': - # A p element's end tag may be omitted if the p element is - # immediately followed by an address, article, aside, - # blockquote, datagrid, dialog, dir, div, dl, fieldset, - # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu, - # nav, ol, p, pre, section, table, or ul, element, or if - # there is no more content in the parent element. - if type in ("StartTag", "EmptyTag"): - return next["name"] in ('address', 'article', 'aside', - 'blockquote', 'datagrid', 'dialog', - 'dir', 'div', 'dl', 'fieldset', 'footer', - 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', - 'header', 'hr', 'menu', 'nav', 'ol', - 'p', 'pre', 'section', 'table', 'ul') - else: - return type == "EndTag" or type is None - elif tagname == 'option': - # An option element's end tag may be omitted if the option - # element is immediately followed by another option element, - # or if it is immediately followed by an <code>optgroup</code> - # element, or if there is no more content in the parent - # element. - if type == "StartTag": - return next["name"] in ('option', 'optgroup') - else: - return type == "EndTag" or type is None - elif tagname in ('rt', 'rp'): - # An rt element's end tag may be omitted if the rt element is - # immediately followed by an rt or rp element, or if there is - # no more content in the parent element. - # An rp element's end tag may be omitted if the rp element is - # immediately followed by an rt or rp element, or if there is - # no more content in the parent element. - if type == "StartTag": - return next["name"] in ('rt', 'rp') - else: - return type == "EndTag" or type is None - elif tagname == 'colgroup': - # A colgroup element's end tag may be omitted if the colgroup - # element is not immediately followed by a space character or - # a comment. - if type in ("Comment", "SpaceCharacters"): - return False - elif type == "StartTag": - # XXX: we also look for an immediately following colgroup - # element. See is_optional_start. - return next["name"] != 'colgroup' - else: - return True - elif tagname in ('thead', 'tbody'): - # A thead element's end tag may be omitted if the thead element - # is immediately followed by a tbody or tfoot element. - # A tbody element's end tag may be omitted if the tbody element - # is immediately followed by a tbody or tfoot element, or if - # there is no more content in the parent element. - # A tfoot element's end tag may be omitted if the tfoot element - # is immediately followed by a tbody element, or if there is no - # more content in the parent element. - # XXX: we never omit the end tag when the following element is - # a tbody. See is_optional_start. - if type == "StartTag": - return next["name"] in ['tbody', 'tfoot'] - elif tagname == 'tbody': - return type == "EndTag" or type is None - else: - return False - elif tagname == 'tfoot': - # A tfoot element's end tag may be omitted if the tfoot element - # is immediately followed by a tbody element, or if there is no - # more content in the parent element. - # XXX: we never omit the end tag when the following element is - # a tbody. See is_optional_start. - if type == "StartTag": - return next["name"] == 'tbody' - else: - return type == "EndTag" or type is None - elif tagname in ('td', 'th'): - # A td element's end tag may be omitted if the td element is - # immediately followed by a td or th element, or if there is - # no more content in the parent element. - # A th element's end tag may be omitted if the th element is - # immediately followed by a td or th element, or if there is - # no more content in the parent element. - if type == "StartTag": - return next["name"] in ('td', 'th') - else: - return type == "EndTag" or type is None - return False diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py b/env/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py deleted file mode 100644 index aa7431d131213f85ab36cacc54b000e88898080b..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py +++ /dev/null @@ -1,916 +0,0 @@ -"""Deprecated from html5lib 1.1. - -See `here <https://github.com/html5lib/html5lib-python/issues/443>`_ for -information about its deprecation; `Bleach <https://github.com/mozilla/bleach>`_ -is recommended as a replacement. Please let us know in the aforementioned issue -if Bleach is unsuitable for your needs. - -""" -from __future__ import absolute_import, division, unicode_literals - -import re -import warnings -from xml.sax.saxutils import escape, unescape - -from pip._vendor.six.moves import urllib_parse as urlparse - -from . import base -from ..constants import namespaces, prefixes - -__all__ = ["Filter"] - - -_deprecation_msg = ( - "html5lib's sanitizer is deprecated; see " + - "https://github.com/html5lib/html5lib-python/issues/443 and please let " + - "us know if Bleach is unsuitable for your needs" -) - -warnings.warn(_deprecation_msg, DeprecationWarning) - -allowed_elements = frozenset(( - (namespaces['html'], 'a'), - (namespaces['html'], 'abbr'), - (namespaces['html'], 'acronym'), - (namespaces['html'], 'address'), - (namespaces['html'], 'area'), - (namespaces['html'], 'article'), - (namespaces['html'], 'aside'), - (namespaces['html'], 'audio'), - (namespaces['html'], 'b'), - (namespaces['html'], 'big'), - (namespaces['html'], 'blockquote'), - (namespaces['html'], 'br'), - (namespaces['html'], 'button'), - (namespaces['html'], 'canvas'), - (namespaces['html'], 'caption'), - (namespaces['html'], 'center'), - (namespaces['html'], 'cite'), - (namespaces['html'], 'code'), - (namespaces['html'], 'col'), - (namespaces['html'], 'colgroup'), - (namespaces['html'], 'command'), - (namespaces['html'], 'datagrid'), - (namespaces['html'], 'datalist'), - (namespaces['html'], 'dd'), - (namespaces['html'], 'del'), - (namespaces['html'], 'details'), - (namespaces['html'], 'dfn'), - (namespaces['html'], 'dialog'), - (namespaces['html'], 'dir'), - (namespaces['html'], 'div'), - (namespaces['html'], 'dl'), - (namespaces['html'], 'dt'), - (namespaces['html'], 'em'), - (namespaces['html'], 'event-source'), - (namespaces['html'], 'fieldset'), - (namespaces['html'], 'figcaption'), - (namespaces['html'], 'figure'), - (namespaces['html'], 'footer'), - (namespaces['html'], 'font'), - (namespaces['html'], 'form'), - (namespaces['html'], 'header'), - (namespaces['html'], 'h1'), - (namespaces['html'], 'h2'), - (namespaces['html'], 'h3'), - (namespaces['html'], 'h4'), - (namespaces['html'], 'h5'), - (namespaces['html'], 'h6'), - (namespaces['html'], 'hr'), - (namespaces['html'], 'i'), - (namespaces['html'], 'img'), - (namespaces['html'], 'input'), - (namespaces['html'], 'ins'), - (namespaces['html'], 'keygen'), - (namespaces['html'], 'kbd'), - (namespaces['html'], 'label'), - (namespaces['html'], 'legend'), - (namespaces['html'], 'li'), - (namespaces['html'], 'm'), - (namespaces['html'], 'map'), - (namespaces['html'], 'menu'), - (namespaces['html'], 'meter'), - (namespaces['html'], 'multicol'), - (namespaces['html'], 'nav'), - (namespaces['html'], 'nextid'), - (namespaces['html'], 'ol'), - (namespaces['html'], 'output'), - (namespaces['html'], 'optgroup'), - (namespaces['html'], 'option'), - (namespaces['html'], 'p'), - (namespaces['html'], 'pre'), - (namespaces['html'], 'progress'), - (namespaces['html'], 'q'), - (namespaces['html'], 's'), - (namespaces['html'], 'samp'), - (namespaces['html'], 'section'), - (namespaces['html'], 'select'), - (namespaces['html'], 'small'), - (namespaces['html'], 'sound'), - (namespaces['html'], 'source'), - (namespaces['html'], 'spacer'), - (namespaces['html'], 'span'), - (namespaces['html'], 'strike'), - (namespaces['html'], 'strong'), - (namespaces['html'], 'sub'), - (namespaces['html'], 'sup'), - (namespaces['html'], 'table'), - (namespaces['html'], 'tbody'), - (namespaces['html'], 'td'), - (namespaces['html'], 'textarea'), - (namespaces['html'], 'time'), - (namespaces['html'], 'tfoot'), - (namespaces['html'], 'th'), - (namespaces['html'], 'thead'), - (namespaces['html'], 'tr'), - (namespaces['html'], 'tt'), - (namespaces['html'], 'u'), - (namespaces['html'], 'ul'), - (namespaces['html'], 'var'), - (namespaces['html'], 'video'), - (namespaces['mathml'], 'maction'), - (namespaces['mathml'], 'math'), - (namespaces['mathml'], 'merror'), - (namespaces['mathml'], 'mfrac'), - (namespaces['mathml'], 'mi'), - (namespaces['mathml'], 'mmultiscripts'), - (namespaces['mathml'], 'mn'), - (namespaces['mathml'], 'mo'), - (namespaces['mathml'], 'mover'), - (namespaces['mathml'], 'mpadded'), - (namespaces['mathml'], 'mphantom'), - (namespaces['mathml'], 'mprescripts'), - (namespaces['mathml'], 'mroot'), - (namespaces['mathml'], 'mrow'), - (namespaces['mathml'], 'mspace'), - (namespaces['mathml'], 'msqrt'), - (namespaces['mathml'], 'mstyle'), - (namespaces['mathml'], 'msub'), - (namespaces['mathml'], 'msubsup'), - (namespaces['mathml'], 'msup'), - (namespaces['mathml'], 'mtable'), - (namespaces['mathml'], 'mtd'), - (namespaces['mathml'], 'mtext'), - (namespaces['mathml'], 'mtr'), - (namespaces['mathml'], 'munder'), - (namespaces['mathml'], 'munderover'), - (namespaces['mathml'], 'none'), - (namespaces['svg'], 'a'), - (namespaces['svg'], 'animate'), - (namespaces['svg'], 'animateColor'), - (namespaces['svg'], 'animateMotion'), - (namespaces['svg'], 'animateTransform'), - (namespaces['svg'], 'clipPath'), - (namespaces['svg'], 'circle'), - (namespaces['svg'], 'defs'), - (namespaces['svg'], 'desc'), - (namespaces['svg'], 'ellipse'), - (namespaces['svg'], 'font-face'), - (namespaces['svg'], 'font-face-name'), - (namespaces['svg'], 'font-face-src'), - (namespaces['svg'], 'g'), - (namespaces['svg'], 'glyph'), - (namespaces['svg'], 'hkern'), - (namespaces['svg'], 'linearGradient'), - (namespaces['svg'], 'line'), - (namespaces['svg'], 'marker'), - (namespaces['svg'], 'metadata'), - (namespaces['svg'], 'missing-glyph'), - (namespaces['svg'], 'mpath'), - (namespaces['svg'], 'path'), - (namespaces['svg'], 'polygon'), - (namespaces['svg'], 'polyline'), - (namespaces['svg'], 'radialGradient'), - (namespaces['svg'], 'rect'), - (namespaces['svg'], 'set'), - (namespaces['svg'], 'stop'), - (namespaces['svg'], 'svg'), - (namespaces['svg'], 'switch'), - (namespaces['svg'], 'text'), - (namespaces['svg'], 'title'), - (namespaces['svg'], 'tspan'), - (namespaces['svg'], 'use'), -)) - -allowed_attributes = frozenset(( - # HTML attributes - (None, 'abbr'), - (None, 'accept'), - (None, 'accept-charset'), - (None, 'accesskey'), - (None, 'action'), - (None, 'align'), - (None, 'alt'), - (None, 'autocomplete'), - (None, 'autofocus'), - (None, 'axis'), - (None, 'background'), - (None, 'balance'), - (None, 'bgcolor'), - (None, 'bgproperties'), - (None, 'border'), - (None, 'bordercolor'), - (None, 'bordercolordark'), - (None, 'bordercolorlight'), - (None, 'bottompadding'), - (None, 'cellpadding'), - (None, 'cellspacing'), - (None, 'ch'), - (None, 'challenge'), - (None, 'char'), - (None, 'charoff'), - (None, 'choff'), - (None, 'charset'), - (None, 'checked'), - (None, 'cite'), - (None, 'class'), - (None, 'clear'), - (None, 'color'), - (None, 'cols'), - (None, 'colspan'), - (None, 'compact'), - (None, 'contenteditable'), - (None, 'controls'), - (None, 'coords'), - (None, 'data'), - (None, 'datafld'), - (None, 'datapagesize'), - (None, 'datasrc'), - (None, 'datetime'), - (None, 'default'), - (None, 'delay'), - (None, 'dir'), - (None, 'disabled'), - (None, 'draggable'), - (None, 'dynsrc'), - (None, 'enctype'), - (None, 'end'), - (None, 'face'), - (None, 'for'), - (None, 'form'), - (None, 'frame'), - (None, 'galleryimg'), - (None, 'gutter'), - (None, 'headers'), - (None, 'height'), - (None, 'hidefocus'), - (None, 'hidden'), - (None, 'high'), - (None, 'href'), - (None, 'hreflang'), - (None, 'hspace'), - (None, 'icon'), - (None, 'id'), - (None, 'inputmode'), - (None, 'ismap'), - (None, 'keytype'), - (None, 'label'), - (None, 'leftspacing'), - (None, 'lang'), - (None, 'list'), - (None, 'longdesc'), - (None, 'loop'), - (None, 'loopcount'), - (None, 'loopend'), - (None, 'loopstart'), - (None, 'low'), - (None, 'lowsrc'), - (None, 'max'), - (None, 'maxlength'), - (None, 'media'), - (None, 'method'), - (None, 'min'), - (None, 'multiple'), - (None, 'name'), - (None, 'nohref'), - (None, 'noshade'), - (None, 'nowrap'), - (None, 'open'), - (None, 'optimum'), - (None, 'pattern'), - (None, 'ping'), - (None, 'point-size'), - (None, 'poster'), - (None, 'pqg'), - (None, 'preload'), - (None, 'prompt'), - (None, 'radiogroup'), - (None, 'readonly'), - (None, 'rel'), - (None, 'repeat-max'), - (None, 'repeat-min'), - (None, 'replace'), - (None, 'required'), - (None, 'rev'), - (None, 'rightspacing'), - (None, 'rows'), - (None, 'rowspan'), - (None, 'rules'), - (None, 'scope'), - (None, 'selected'), - (None, 'shape'), - (None, 'size'), - (None, 'span'), - (None, 'src'), - (None, 'start'), - (None, 'step'), - (None, 'style'), - (None, 'summary'), - (None, 'suppress'), - (None, 'tabindex'), - (None, 'target'), - (None, 'template'), - (None, 'title'), - (None, 'toppadding'), - (None, 'type'), - (None, 'unselectable'), - (None, 'usemap'), - (None, 'urn'), - (None, 'valign'), - (None, 'value'), - (None, 'variable'), - (None, 'volume'), - (None, 'vspace'), - (None, 'vrml'), - (None, 'width'), - (None, 'wrap'), - (namespaces['xml'], 'lang'), - # MathML attributes - (None, 'actiontype'), - (None, 'align'), - (None, 'columnalign'), - (None, 'columnalign'), - (None, 'columnalign'), - (None, 'columnlines'), - (None, 'columnspacing'), - (None, 'columnspan'), - (None, 'depth'), - (None, 'display'), - (None, 'displaystyle'), - (None, 'equalcolumns'), - (None, 'equalrows'), - (None, 'fence'), - (None, 'fontstyle'), - (None, 'fontweight'), - (None, 'frame'), - (None, 'height'), - (None, 'linethickness'), - (None, 'lspace'), - (None, 'mathbackground'), - (None, 'mathcolor'), - (None, 'mathvariant'), - (None, 'mathvariant'), - (None, 'maxsize'), - (None, 'minsize'), - (None, 'other'), - (None, 'rowalign'), - (None, 'rowalign'), - (None, 'rowalign'), - (None, 'rowlines'), - (None, 'rowspacing'), - (None, 'rowspan'), - (None, 'rspace'), - (None, 'scriptlevel'), - (None, 'selection'), - (None, 'separator'), - (None, 'stretchy'), - (None, 'width'), - (None, 'width'), - (namespaces['xlink'], 'href'), - (namespaces['xlink'], 'show'), - (namespaces['xlink'], 'type'), - # SVG attributes - (None, 'accent-height'), - (None, 'accumulate'), - (None, 'additive'), - (None, 'alphabetic'), - (None, 'arabic-form'), - (None, 'ascent'), - (None, 'attributeName'), - (None, 'attributeType'), - (None, 'baseProfile'), - (None, 'bbox'), - (None, 'begin'), - (None, 'by'), - (None, 'calcMode'), - (None, 'cap-height'), - (None, 'class'), - (None, 'clip-path'), - (None, 'color'), - (None, 'color-rendering'), - (None, 'content'), - (None, 'cx'), - (None, 'cy'), - (None, 'd'), - (None, 'dx'), - (None, 'dy'), - (None, 'descent'), - (None, 'display'), - (None, 'dur'), - (None, 'end'), - (None, 'fill'), - (None, 'fill-opacity'), - (None, 'fill-rule'), - (None, 'font-family'), - (None, 'font-size'), - (None, 'font-stretch'), - (None, 'font-style'), - (None, 'font-variant'), - (None, 'font-weight'), - (None, 'from'), - (None, 'fx'), - (None, 'fy'), - (None, 'g1'), - (None, 'g2'), - (None, 'glyph-name'), - (None, 'gradientUnits'), - (None, 'hanging'), - (None, 'height'), - (None, 'horiz-adv-x'), - (None, 'horiz-origin-x'), - (None, 'id'), - (None, 'ideographic'), - (None, 'k'), - (None, 'keyPoints'), - (None, 'keySplines'), - (None, 'keyTimes'), - (None, 'lang'), - (None, 'marker-end'), - (None, 'marker-mid'), - (None, 'marker-start'), - (None, 'markerHeight'), - (None, 'markerUnits'), - (None, 'markerWidth'), - (None, 'mathematical'), - (None, 'max'), - (None, 'min'), - (None, 'name'), - (None, 'offset'), - (None, 'opacity'), - (None, 'orient'), - (None, 'origin'), - (None, 'overline-position'), - (None, 'overline-thickness'), - (None, 'panose-1'), - (None, 'path'), - (None, 'pathLength'), - (None, 'points'), - (None, 'preserveAspectRatio'), - (None, 'r'), - (None, 'refX'), - (None, 'refY'), - (None, 'repeatCount'), - (None, 'repeatDur'), - (None, 'requiredExtensions'), - (None, 'requiredFeatures'), - (None, 'restart'), - (None, 'rotate'), - (None, 'rx'), - (None, 'ry'), - (None, 'slope'), - (None, 'stemh'), - (None, 'stemv'), - (None, 'stop-color'), - (None, 'stop-opacity'), - (None, 'strikethrough-position'), - (None, 'strikethrough-thickness'), - (None, 'stroke'), - (None, 'stroke-dasharray'), - (None, 'stroke-dashoffset'), - (None, 'stroke-linecap'), - (None, 'stroke-linejoin'), - (None, 'stroke-miterlimit'), - (None, 'stroke-opacity'), - (None, 'stroke-width'), - (None, 'systemLanguage'), - (None, 'target'), - (None, 'text-anchor'), - (None, 'to'), - (None, 'transform'), - (None, 'type'), - (None, 'u1'), - (None, 'u2'), - (None, 'underline-position'), - (None, 'underline-thickness'), - (None, 'unicode'), - (None, 'unicode-range'), - (None, 'units-per-em'), - (None, 'values'), - (None, 'version'), - (None, 'viewBox'), - (None, 'visibility'), - (None, 'width'), - (None, 'widths'), - (None, 'x'), - (None, 'x-height'), - (None, 'x1'), - (None, 'x2'), - (namespaces['xlink'], 'actuate'), - (namespaces['xlink'], 'arcrole'), - (namespaces['xlink'], 'href'), - (namespaces['xlink'], 'role'), - (namespaces['xlink'], 'show'), - (namespaces['xlink'], 'title'), - (namespaces['xlink'], 'type'), - (namespaces['xml'], 'base'), - (namespaces['xml'], 'lang'), - (namespaces['xml'], 'space'), - (None, 'y'), - (None, 'y1'), - (None, 'y2'), - (None, 'zoomAndPan'), -)) - -attr_val_is_uri = frozenset(( - (None, 'href'), - (None, 'src'), - (None, 'cite'), - (None, 'action'), - (None, 'longdesc'), - (None, 'poster'), - (None, 'background'), - (None, 'datasrc'), - (None, 'dynsrc'), - (None, 'lowsrc'), - (None, 'ping'), - (namespaces['xlink'], 'href'), - (namespaces['xml'], 'base'), -)) - -svg_attr_val_allows_ref = frozenset(( - (None, 'clip-path'), - (None, 'color-profile'), - (None, 'cursor'), - (None, 'fill'), - (None, 'filter'), - (None, 'marker'), - (None, 'marker-start'), - (None, 'marker-mid'), - (None, 'marker-end'), - (None, 'mask'), - (None, 'stroke'), -)) - -svg_allow_local_href = frozenset(( - (None, 'altGlyph'), - (None, 'animate'), - (None, 'animateColor'), - (None, 'animateMotion'), - (None, 'animateTransform'), - (None, 'cursor'), - (None, 'feImage'), - (None, 'filter'), - (None, 'linearGradient'), - (None, 'pattern'), - (None, 'radialGradient'), - (None, 'textpath'), - (None, 'tref'), - (None, 'set'), - (None, 'use') -)) - -allowed_css_properties = frozenset(( - 'azimuth', - 'background-color', - 'border-bottom-color', - 'border-collapse', - 'border-color', - 'border-left-color', - 'border-right-color', - 'border-top-color', - 'clear', - 'color', - 'cursor', - 'direction', - 'display', - 'elevation', - 'float', - 'font', - 'font-family', - 'font-size', - 'font-style', - 'font-variant', - 'font-weight', - 'height', - 'letter-spacing', - 'line-height', - 'overflow', - 'pause', - 'pause-after', - 'pause-before', - 'pitch', - 'pitch-range', - 'richness', - 'speak', - 'speak-header', - 'speak-numeral', - 'speak-punctuation', - 'speech-rate', - 'stress', - 'text-align', - 'text-decoration', - 'text-indent', - 'unicode-bidi', - 'vertical-align', - 'voice-family', - 'volume', - 'white-space', - 'width', -)) - -allowed_css_keywords = frozenset(( - 'auto', - 'aqua', - 'black', - 'block', - 'blue', - 'bold', - 'both', - 'bottom', - 'brown', - 'center', - 'collapse', - 'dashed', - 'dotted', - 'fuchsia', - 'gray', - 'green', - '!important', - 'italic', - 'left', - 'lime', - 'maroon', - 'medium', - 'none', - 'navy', - 'normal', - 'nowrap', - 'olive', - 'pointer', - 'purple', - 'red', - 'right', - 'solid', - 'silver', - 'teal', - 'top', - 'transparent', - 'underline', - 'white', - 'yellow', -)) - -allowed_svg_properties = frozenset(( - 'fill', - 'fill-opacity', - 'fill-rule', - 'stroke', - 'stroke-width', - 'stroke-linecap', - 'stroke-linejoin', - 'stroke-opacity', -)) - -allowed_protocols = frozenset(( - 'ed2k', - 'ftp', - 'http', - 'https', - 'irc', - 'mailto', - 'news', - 'gopher', - 'nntp', - 'telnet', - 'webcal', - 'xmpp', - 'callto', - 'feed', - 'urn', - 'aim', - 'rsync', - 'tag', - 'ssh', - 'sftp', - 'rtsp', - 'afs', - 'data', -)) - -allowed_content_types = frozenset(( - 'image/png', - 'image/jpeg', - 'image/gif', - 'image/webp', - 'image/bmp', - 'text/plain', -)) - - -data_content_type = re.compile(r''' - ^ - # Match a content type <application>/<type> - (?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+) - # Match any character set and encoding - (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?) - |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?) - # Assume the rest is data - ,.* - $ - ''', - re.VERBOSE) - - -class Filter(base.Filter): - """Sanitizes token stream of XHTML+MathML+SVG and of inline style attributes""" - def __init__(self, - source, - allowed_elements=allowed_elements, - allowed_attributes=allowed_attributes, - allowed_css_properties=allowed_css_properties, - allowed_css_keywords=allowed_css_keywords, - allowed_svg_properties=allowed_svg_properties, - allowed_protocols=allowed_protocols, - allowed_content_types=allowed_content_types, - attr_val_is_uri=attr_val_is_uri, - svg_attr_val_allows_ref=svg_attr_val_allows_ref, - svg_allow_local_href=svg_allow_local_href): - """Creates a Filter - - :arg allowed_elements: set of elements to allow--everything else will - be escaped - - :arg allowed_attributes: set of attributes to allow in - elements--everything else will be stripped - - :arg allowed_css_properties: set of CSS properties to allow--everything - else will be stripped - - :arg allowed_css_keywords: set of CSS keywords to allow--everything - else will be stripped - - :arg allowed_svg_properties: set of SVG properties to allow--everything - else will be removed - - :arg allowed_protocols: set of allowed protocols for URIs - - :arg allowed_content_types: set of allowed content types for ``data`` URIs. - - :arg attr_val_is_uri: set of attributes that have URI values--values - that have a scheme not listed in ``allowed_protocols`` are removed - - :arg svg_attr_val_allows_ref: set of SVG attributes that can have - references - - :arg svg_allow_local_href: set of SVG elements that can have local - hrefs--these are removed - - """ - super(Filter, self).__init__(source) - - warnings.warn(_deprecation_msg, DeprecationWarning) - - self.allowed_elements = allowed_elements - self.allowed_attributes = allowed_attributes - self.allowed_css_properties = allowed_css_properties - self.allowed_css_keywords = allowed_css_keywords - self.allowed_svg_properties = allowed_svg_properties - self.allowed_protocols = allowed_protocols - self.allowed_content_types = allowed_content_types - self.attr_val_is_uri = attr_val_is_uri - self.svg_attr_val_allows_ref = svg_attr_val_allows_ref - self.svg_allow_local_href = svg_allow_local_href - - def __iter__(self): - for token in base.Filter.__iter__(self): - token = self.sanitize_token(token) - if token: - yield token - - # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and - # stripping out all attributes not in ALLOWED_ATTRIBUTES. Style attributes - # are parsed, and a restricted set, specified by ALLOWED_CSS_PROPERTIES and - # ALLOWED_CSS_KEYWORDS, are allowed through. attributes in ATTR_VAL_IS_URI - # are scanned, and only URI schemes specified in ALLOWED_PROTOCOLS are - # allowed. - # - # sanitize_html('<script> do_nasty_stuff() </script>') - # => <script> do_nasty_stuff() </script> - # sanitize_html('<a href="javascript: sucker();">Click here for $100</a>') - # => <a>Click here for $100</a> - def sanitize_token(self, token): - - # accommodate filters which use token_type differently - token_type = token["type"] - if token_type in ("StartTag", "EndTag", "EmptyTag"): - name = token["name"] - namespace = token["namespace"] - if ((namespace, name) in self.allowed_elements or - (namespace is None and - (namespaces["html"], name) in self.allowed_elements)): - return self.allowed_token(token) - else: - return self.disallowed_token(token) - elif token_type == "Comment": - pass - else: - return token - - def allowed_token(self, token): - if "data" in token: - attrs = token["data"] - attr_names = set(attrs.keys()) - - # Remove forbidden attributes - for to_remove in (attr_names - self.allowed_attributes): - del token["data"][to_remove] - attr_names.remove(to_remove) - - # Remove attributes with disallowed URL values - for attr in (attr_names & self.attr_val_is_uri): - assert attr in attrs - # I don't have a clue where this regexp comes from or why it matches those - # characters, nor why we call unescape. I just know it's always been here. - # Should you be worried by this comment in a sanitizer? Yes. On the other hand, all - # this will do is remove *more* than it otherwise would. - val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\\s]+", '', - unescape(attrs[attr])).lower() - # remove replacement characters from unescaped characters - val_unescaped = val_unescaped.replace("\ufffd", "") - try: - uri = urlparse.urlparse(val_unescaped) - except ValueError: - uri = None - del attrs[attr] - if uri and uri.scheme: - if uri.scheme not in self.allowed_protocols: - del attrs[attr] - if uri.scheme == 'data': - m = data_content_type.match(uri.path) - if not m: - del attrs[attr] - elif m.group('content_type') not in self.allowed_content_types: - del attrs[attr] - - for attr in self.svg_attr_val_allows_ref: - if attr in attrs: - attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', - ' ', - unescape(attrs[attr])) - if (token["name"] in self.svg_allow_local_href and - (namespaces['xlink'], 'href') in attrs and re.search(r'^\s*[^#\s].*', - attrs[(namespaces['xlink'], 'href')])): - del attrs[(namespaces['xlink'], 'href')] - if (None, 'style') in attrs: - attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')]) - token["data"] = attrs - return token - - def disallowed_token(self, token): - token_type = token["type"] - if token_type == "EndTag": - token["data"] = "</%s>" % token["name"] - elif token["data"]: - assert token_type in ("StartTag", "EmptyTag") - attrs = [] - for (ns, name), v in token["data"].items(): - attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v))) - token["data"] = "<%s%s>" % (token["name"], ''.join(attrs)) - else: - token["data"] = "<%s>" % token["name"] - if token.get("selfClosing"): - token["data"] = token["data"][:-1] + "/>" - - token["type"] = "Characters" - - del token["name"] - return token - - def sanitize_css(self, style): - # disallow urls - style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) - - # gauntlet - if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): - return '' - if not re.match(r"^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): - return '' - - clean = [] - for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style): - if not value: - continue - if prop.lower() in self.allowed_css_properties: - clean.append(prop + ': ' + value + ';') - elif prop.split('-')[0].lower() in ['background', 'border', 'margin', - 'padding']: - for keyword in value.split(): - if keyword not in self.allowed_css_keywords and \ - not re.match(r"^(#[0-9a-fA-F]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa - break - else: - clean.append(prop + ': ' + value + ';') - elif prop.lower() in self.allowed_svg_properties: - clean.append(prop + ': ' + value + ';') - - return ' '.join(clean) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py b/env/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py deleted file mode 100644 index 0d12584b45995d35110f75af00193fdad0fa10f4..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -import re - -from . import base -from ..constants import rcdataElements, spaceCharacters -spaceCharacters = "".join(spaceCharacters) - -SPACES_REGEX = re.compile("[%s]+" % spaceCharacters) - - -class Filter(base.Filter): - """Collapses whitespace except in pre, textarea, and script elements""" - spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) - - def __iter__(self): - preserve = 0 - for token in base.Filter.__iter__(self): - type = token["type"] - if type == "StartTag" \ - and (preserve or token["name"] in self.spacePreserveElements): - preserve += 1 - - elif type == "EndTag" and preserve: - preserve -= 1 - - elif not preserve and type == "SpaceCharacters" and token["data"]: - # Test on token["data"] above to not introduce spaces where there were not - token["data"] = " " - - elif not preserve and type == "Characters": - token["data"] = collapse_spaces(token["data"]) - - yield token - - -def collapse_spaces(text): - return SPACES_REGEX.sub(' ', text) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/html5parser.py b/env/Lib/site-packages/pip/_vendor/html5lib/html5parser.py deleted file mode 100644 index d06784f3d254176d1bd125cfd4d3af7f13005387..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/html5parser.py +++ /dev/null @@ -1,2795 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import with_metaclass, viewkeys - -import types - -from . import _inputstream -from . import _tokenizer - -from . import treebuilders -from .treebuilders.base import Marker - -from . import _utils -from .constants import ( - spaceCharacters, asciiUpper2Lower, - specialElements, headingElements, cdataElements, rcdataElements, - tokenTypes, tagTokenTypes, - namespaces, - htmlIntegrationPointElements, mathmlTextIntegrationPointElements, - adjustForeignAttributes as adjustForeignAttributesMap, - adjustMathMLAttributes, adjustSVGAttributes, - E, - _ReparseException -) - - -def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs): - """Parse an HTML document as a string or file-like object into a tree - - :arg doc: the document to parse as a string or file-like object - - :arg treebuilder: the treebuilder to use when parsing - - :arg namespaceHTMLElements: whether or not to namespace HTML elements - - :returns: parsed tree - - Example: - - >>> from html5lib.html5parser import parse - >>> parse('<html><body><p>This is a doc</p></body></html>') - <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0> - - """ - tb = treebuilders.getTreeBuilder(treebuilder) - p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) - return p.parse(doc, **kwargs) - - -def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs): - """Parse an HTML fragment as a string or file-like object into a tree - - :arg doc: the fragment to parse as a string or file-like object - - :arg container: the container context to parse the fragment in - - :arg treebuilder: the treebuilder to use when parsing - - :arg namespaceHTMLElements: whether or not to namespace HTML elements - - :returns: parsed tree - - Example: - - >>> from html5lib.html5libparser import parseFragment - >>> parseFragment('<b>this is a fragment</b>') - <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> - - """ - tb = treebuilders.getTreeBuilder(treebuilder) - p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) - return p.parseFragment(doc, container=container, **kwargs) - - -def method_decorator_metaclass(function): - class Decorated(type): - def __new__(meta, classname, bases, classDict): - for attributeName, attribute in classDict.items(): - if isinstance(attribute, types.FunctionType): - attribute = function(attribute) - - classDict[attributeName] = attribute - return type.__new__(meta, classname, bases, classDict) - return Decorated - - -class HTMLParser(object): - """HTML parser - - Generates a tree structure from a stream of (possibly malformed) HTML. - - """ - - def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False): - """ - :arg tree: a treebuilder class controlling the type of tree that will be - returned. Built in treebuilders can be accessed through - html5lib.treebuilders.getTreeBuilder(treeType) - - :arg strict: raise an exception when a parse error is encountered - - :arg namespaceHTMLElements: whether or not to namespace HTML elements - - :arg debug: whether or not to enable debug mode which logs things - - Example: - - >>> from html5lib.html5parser import HTMLParser - >>> parser = HTMLParser() # generates parser with etree builder - >>> parser = HTMLParser('lxml', strict=True) # generates parser with lxml builder which is strict - - """ - - # Raise an exception on the first error encountered - self.strict = strict - - if tree is None: - tree = treebuilders.getTreeBuilder("etree") - self.tree = tree(namespaceHTMLElements) - self.errors = [] - - self.phases = {name: cls(self, self.tree) for name, cls in - getPhases(debug).items()} - - def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs): - - self.innerHTMLMode = innerHTML - self.container = container - self.scripting = scripting - self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs) - self.reset() - - try: - self.mainLoop() - except _ReparseException: - self.reset() - self.mainLoop() - - def reset(self): - self.tree.reset() - self.firstStartTag = False - self.errors = [] - self.log = [] # only used with debug mode - # "quirks" / "limited quirks" / "no quirks" - self.compatMode = "no quirks" - - if self.innerHTMLMode: - self.innerHTML = self.container.lower() - - if self.innerHTML in cdataElements: - self.tokenizer.state = self.tokenizer.rcdataState - elif self.innerHTML in rcdataElements: - self.tokenizer.state = self.tokenizer.rawtextState - elif self.innerHTML == 'plaintext': - self.tokenizer.state = self.tokenizer.plaintextState - else: - # state already is data state - # self.tokenizer.state = self.tokenizer.dataState - pass - self.phase = self.phases["beforeHtml"] - self.phase.insertHtmlElement() - self.resetInsertionMode() - else: - self.innerHTML = False # pylint:disable=redefined-variable-type - self.phase = self.phases["initial"] - - self.lastPhase = None - - self.beforeRCDataPhase = None - - self.framesetOK = True - - @property - def documentEncoding(self): - """Name of the character encoding that was used to decode the input stream, or - :obj:`None` if that is not determined yet - - """ - if not hasattr(self, 'tokenizer'): - return None - return self.tokenizer.stream.charEncoding[0].name - - def isHTMLIntegrationPoint(self, element): - if (element.name == "annotation-xml" and - element.namespace == namespaces["mathml"]): - return ("encoding" in element.attributes and - element.attributes["encoding"].translate( - asciiUpper2Lower) in - ("text/html", "application/xhtml+xml")) - else: - return (element.namespace, element.name) in htmlIntegrationPointElements - - def isMathMLTextIntegrationPoint(self, element): - return (element.namespace, element.name) in mathmlTextIntegrationPointElements - - def mainLoop(self): - CharactersToken = tokenTypes["Characters"] - SpaceCharactersToken = tokenTypes["SpaceCharacters"] - StartTagToken = tokenTypes["StartTag"] - EndTagToken = tokenTypes["EndTag"] - CommentToken = tokenTypes["Comment"] - DoctypeToken = tokenTypes["Doctype"] - ParseErrorToken = tokenTypes["ParseError"] - - for token in self.tokenizer: - prev_token = None - new_token = token - while new_token is not None: - prev_token = new_token - currentNode = self.tree.openElements[-1] if self.tree.openElements else None - currentNodeNamespace = currentNode.namespace if currentNode else None - currentNodeName = currentNode.name if currentNode else None - - type = new_token["type"] - - if type == ParseErrorToken: - self.parseError(new_token["data"], new_token.get("datavars", {})) - new_token = None - else: - if (len(self.tree.openElements) == 0 or - currentNodeNamespace == self.tree.defaultNamespace or - (self.isMathMLTextIntegrationPoint(currentNode) and - ((type == StartTagToken and - token["name"] not in frozenset(["mglyph", "malignmark"])) or - type in (CharactersToken, SpaceCharactersToken))) or - (currentNodeNamespace == namespaces["mathml"] and - currentNodeName == "annotation-xml" and - type == StartTagToken and - token["name"] == "svg") or - (self.isHTMLIntegrationPoint(currentNode) and - type in (StartTagToken, CharactersToken, SpaceCharactersToken))): - phase = self.phase - else: - phase = self.phases["inForeignContent"] - - if type == CharactersToken: - new_token = phase.processCharacters(new_token) - elif type == SpaceCharactersToken: - new_token = phase.processSpaceCharacters(new_token) - elif type == StartTagToken: - new_token = phase.processStartTag(new_token) - elif type == EndTagToken: - new_token = phase.processEndTag(new_token) - elif type == CommentToken: - new_token = phase.processComment(new_token) - elif type == DoctypeToken: - new_token = phase.processDoctype(new_token) - - if (type == StartTagToken and prev_token["selfClosing"] and - not prev_token["selfClosingAcknowledged"]): - self.parseError("non-void-element-with-trailing-solidus", - {"name": prev_token["name"]}) - - # When the loop finishes it's EOF - reprocess = True - phases = [] - while reprocess: - phases.append(self.phase) - reprocess = self.phase.processEOF() - if reprocess: - assert self.phase not in phases - - def parse(self, stream, *args, **kwargs): - """Parse a HTML document into a well-formed tree - - :arg stream: a file-like object or string containing the HTML to be parsed - - The optional encoding parameter must be a string that indicates - the encoding. If specified, that encoding will be used, - regardless of any BOM or later declaration (such as in a meta - element). - - :arg scripting: treat noscript elements as if JavaScript was turned on - - :returns: parsed tree - - Example: - - >>> from html5lib.html5parser import HTMLParser - >>> parser = HTMLParser() - >>> parser.parse('<html><body><p>This is a doc</p></body></html>') - <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0> - - """ - self._parse(stream, False, None, *args, **kwargs) - return self.tree.getDocument() - - def parseFragment(self, stream, *args, **kwargs): - """Parse a HTML fragment into a well-formed tree fragment - - :arg container: name of the element we're setting the innerHTML - property if set to None, default to 'div' - - :arg stream: a file-like object or string containing the HTML to be parsed - - The optional encoding parameter must be a string that indicates - the encoding. If specified, that encoding will be used, - regardless of any BOM or later declaration (such as in a meta - element) - - :arg scripting: treat noscript elements as if JavaScript was turned on - - :returns: parsed tree - - Example: - - >>> from html5lib.html5libparser import HTMLParser - >>> parser = HTMLParser() - >>> parser.parseFragment('<b>this is a fragment</b>') - <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> - - """ - self._parse(stream, True, *args, **kwargs) - return self.tree.getFragment() - - def parseError(self, errorcode="XXX-undefined-error", datavars=None): - # XXX The idea is to make errorcode mandatory. - if datavars is None: - datavars = {} - self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) - if self.strict: - raise ParseError(E[errorcode] % datavars) - - def adjustMathMLAttributes(self, token): - adjust_attributes(token, adjustMathMLAttributes) - - def adjustSVGAttributes(self, token): - adjust_attributes(token, adjustSVGAttributes) - - def adjustForeignAttributes(self, token): - adjust_attributes(token, adjustForeignAttributesMap) - - def reparseTokenNormal(self, token): - # pylint:disable=unused-argument - self.parser.phase() - - def resetInsertionMode(self): - # The name of this method is mostly historical. (It's also used in the - # specification.) - last = False - newModes = { - "select": "inSelect", - "td": "inCell", - "th": "inCell", - "tr": "inRow", - "tbody": "inTableBody", - "thead": "inTableBody", - "tfoot": "inTableBody", - "caption": "inCaption", - "colgroup": "inColumnGroup", - "table": "inTable", - "head": "inBody", - "body": "inBody", - "frameset": "inFrameset", - "html": "beforeHead" - } - for node in self.tree.openElements[::-1]: - nodeName = node.name - new_phase = None - if node == self.tree.openElements[0]: - assert self.innerHTML - last = True - nodeName = self.innerHTML - # Check for conditions that should only happen in the innerHTML - # case - if nodeName in ("select", "colgroup", "head", "html"): - assert self.innerHTML - - if not last and node.namespace != self.tree.defaultNamespace: - continue - - if nodeName in newModes: - new_phase = self.phases[newModes[nodeName]] - break - elif last: - new_phase = self.phases["inBody"] - break - - self.phase = new_phase - - def parseRCDataRawtext(self, token, contentType): - # Generic RCDATA/RAWTEXT Parsing algorithm - assert contentType in ("RAWTEXT", "RCDATA") - - self.tree.insertElement(token) - - if contentType == "RAWTEXT": - self.tokenizer.state = self.tokenizer.rawtextState - else: - self.tokenizer.state = self.tokenizer.rcdataState - - self.originalPhase = self.phase - - self.phase = self.phases["text"] - - -@_utils.memoize -def getPhases(debug): - def log(function): - """Logger that records which phase processes each token""" - type_names = {value: key for key, value in tokenTypes.items()} - - def wrapped(self, *args, **kwargs): - if function.__name__.startswith("process") and len(args) > 0: - token = args[0] - info = {"type": type_names[token['type']]} - if token['type'] in tagTokenTypes: - info["name"] = token['name'] - - self.parser.log.append((self.parser.tokenizer.state.__name__, - self.parser.phase.__class__.__name__, - self.__class__.__name__, - function.__name__, - info)) - return function(self, *args, **kwargs) - else: - return function(self, *args, **kwargs) - return wrapped - - def getMetaclass(use_metaclass, metaclass_func): - if use_metaclass: - return method_decorator_metaclass(metaclass_func) - else: - return type - - # pylint:disable=unused-argument - class Phase(with_metaclass(getMetaclass(debug, log))): - """Base class for helper object that implements each phase of processing - """ - __slots__ = ("parser", "tree", "__startTagCache", "__endTagCache") - - def __init__(self, parser, tree): - self.parser = parser - self.tree = tree - self.__startTagCache = {} - self.__endTagCache = {} - - def processEOF(self): - raise NotImplementedError - - def processComment(self, token): - # For most phases the following is correct. Where it's not it will be - # overridden. - self.tree.insertComment(token, self.tree.openElements[-1]) - - def processDoctype(self, token): - self.parser.parseError("unexpected-doctype") - - def processCharacters(self, token): - self.tree.insertText(token["data"]) - - def processSpaceCharacters(self, token): - self.tree.insertText(token["data"]) - - def processStartTag(self, token): - # Note the caching is done here rather than BoundMethodDispatcher as doing it there - # requires a circular reference to the Phase, and this ends up with a significant - # (CPython 2.7, 3.8) GC cost when parsing many short inputs - name = token["name"] - # In Py2, using `in` is quicker in general than try/except KeyError - # In Py3, `in` is quicker when there are few cache hits (typically short inputs) - if name in self.__startTagCache: - func = self.__startTagCache[name] - else: - func = self.__startTagCache[name] = self.startTagHandler[name] - # bound the cache size in case we get loads of unknown tags - while len(self.__startTagCache) > len(self.startTagHandler) * 1.1: - # this makes the eviction policy random on Py < 3.7 and FIFO >= 3.7 - self.__startTagCache.pop(next(iter(self.__startTagCache))) - return func(token) - - def startTagHtml(self, token): - if not self.parser.firstStartTag and token["name"] == "html": - self.parser.parseError("non-html-root") - # XXX Need a check here to see if the first start tag token emitted is - # this token... If it's not, invoke self.parser.parseError(). - for attr, value in token["data"].items(): - if attr not in self.tree.openElements[0].attributes: - self.tree.openElements[0].attributes[attr] = value - self.parser.firstStartTag = False - - def processEndTag(self, token): - # Note the caching is done here rather than BoundMethodDispatcher as doing it there - # requires a circular reference to the Phase, and this ends up with a significant - # (CPython 2.7, 3.8) GC cost when parsing many short inputs - name = token["name"] - # In Py2, using `in` is quicker in general than try/except KeyError - # In Py3, `in` is quicker when there are few cache hits (typically short inputs) - if name in self.__endTagCache: - func = self.__endTagCache[name] - else: - func = self.__endTagCache[name] = self.endTagHandler[name] - # bound the cache size in case we get loads of unknown tags - while len(self.__endTagCache) > len(self.endTagHandler) * 1.1: - # this makes the eviction policy random on Py < 3.7 and FIFO >= 3.7 - self.__endTagCache.pop(next(iter(self.__endTagCache))) - return func(token) - - class InitialPhase(Phase): - __slots__ = tuple() - - def processSpaceCharacters(self, token): - pass - - def processComment(self, token): - self.tree.insertComment(token, self.tree.document) - - def processDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - correct = token["correct"] - - if (name != "html" or publicId is not None or - systemId is not None and systemId != "about:legacy-compat"): - self.parser.parseError("unknown-doctype") - - if publicId is None: - publicId = "" - - self.tree.insertDoctype(token) - - if publicId != "": - publicId = publicId.translate(asciiUpper2Lower) - - if (not correct or token["name"] != "html" or - publicId.startswith( - ("+//silmaril//dtd html pro v0r11 19970101//", - "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", - "-//as//dtd html 3.0 aswedit + extensions//", - "-//ietf//dtd html 2.0 level 1//", - "-//ietf//dtd html 2.0 level 2//", - "-//ietf//dtd html 2.0 strict level 1//", - "-//ietf//dtd html 2.0 strict level 2//", - "-//ietf//dtd html 2.0 strict//", - "-//ietf//dtd html 2.0//", - "-//ietf//dtd html 2.1e//", - "-//ietf//dtd html 3.0//", - "-//ietf//dtd html 3.2 final//", - "-//ietf//dtd html 3.2//", - "-//ietf//dtd html 3//", - "-//ietf//dtd html level 0//", - "-//ietf//dtd html level 1//", - "-//ietf//dtd html level 2//", - "-//ietf//dtd html level 3//", - "-//ietf//dtd html strict level 0//", - "-//ietf//dtd html strict level 1//", - "-//ietf//dtd html strict level 2//", - "-//ietf//dtd html strict level 3//", - "-//ietf//dtd html strict//", - "-//ietf//dtd html//", - "-//metrius//dtd metrius presentational//", - "-//microsoft//dtd internet explorer 2.0 html strict//", - "-//microsoft//dtd internet explorer 2.0 html//", - "-//microsoft//dtd internet explorer 2.0 tables//", - "-//microsoft//dtd internet explorer 3.0 html strict//", - "-//microsoft//dtd internet explorer 3.0 html//", - "-//microsoft//dtd internet explorer 3.0 tables//", - "-//netscape comm. corp.//dtd html//", - "-//netscape comm. corp.//dtd strict html//", - "-//o'reilly and associates//dtd html 2.0//", - "-//o'reilly and associates//dtd html extended 1.0//", - "-//o'reilly and associates//dtd html extended relaxed 1.0//", - "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", - "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", - "-//spyglass//dtd html 2.0 extended//", - "-//sq//dtd html 2.0 hotmetal + extensions//", - "-//sun microsystems corp.//dtd hotjava html//", - "-//sun microsystems corp.//dtd hotjava strict html//", - "-//w3c//dtd html 3 1995-03-24//", - "-//w3c//dtd html 3.2 draft//", - "-//w3c//dtd html 3.2 final//", - "-//w3c//dtd html 3.2//", - "-//w3c//dtd html 3.2s draft//", - "-//w3c//dtd html 4.0 frameset//", - "-//w3c//dtd html 4.0 transitional//", - "-//w3c//dtd html experimental 19960712//", - "-//w3c//dtd html experimental 970421//", - "-//w3c//dtd w3 html//", - "-//w3o//dtd w3 html 3.0//", - "-//webtechs//dtd mozilla html 2.0//", - "-//webtechs//dtd mozilla html//")) or - publicId in ("-//w3o//dtd w3 html strict 3.0//en//", - "-/w3c/dtd html 4.0 transitional/en", - "html") or - publicId.startswith( - ("-//w3c//dtd html 4.01 frameset//", - "-//w3c//dtd html 4.01 transitional//")) and - systemId is None or - systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): - self.parser.compatMode = "quirks" - elif (publicId.startswith( - ("-//w3c//dtd xhtml 1.0 frameset//", - "-//w3c//dtd xhtml 1.0 transitional//")) or - publicId.startswith( - ("-//w3c//dtd html 4.01 frameset//", - "-//w3c//dtd html 4.01 transitional//")) and - systemId is not None): - self.parser.compatMode = "limited quirks" - - self.parser.phase = self.parser.phases["beforeHtml"] - - def anythingElse(self): - self.parser.compatMode = "quirks" - self.parser.phase = self.parser.phases["beforeHtml"] - - def processCharacters(self, token): - self.parser.parseError("expected-doctype-but-got-chars") - self.anythingElse() - return token - - def processStartTag(self, token): - self.parser.parseError("expected-doctype-but-got-start-tag", - {"name": token["name"]}) - self.anythingElse() - return token - - def processEndTag(self, token): - self.parser.parseError("expected-doctype-but-got-end-tag", - {"name": token["name"]}) - self.anythingElse() - return token - - def processEOF(self): - self.parser.parseError("expected-doctype-but-got-eof") - self.anythingElse() - return True - - class BeforeHtmlPhase(Phase): - __slots__ = tuple() - - # helper methods - def insertHtmlElement(self): - self.tree.insertRoot(impliedTagToken("html", "StartTag")) - self.parser.phase = self.parser.phases["beforeHead"] - - # other - def processEOF(self): - self.insertHtmlElement() - return True - - def processComment(self, token): - self.tree.insertComment(token, self.tree.document) - - def processSpaceCharacters(self, token): - pass - - def processCharacters(self, token): - self.insertHtmlElement() - return token - - def processStartTag(self, token): - if token["name"] == "html": - self.parser.firstStartTag = True - self.insertHtmlElement() - return token - - def processEndTag(self, token): - if token["name"] not in ("head", "body", "html", "br"): - self.parser.parseError("unexpected-end-tag-before-html", - {"name": token["name"]}) - else: - self.insertHtmlElement() - return token - - class BeforeHeadPhase(Phase): - __slots__ = tuple() - - def processEOF(self): - self.startTagHead(impliedTagToken("head", "StartTag")) - return True - - def processSpaceCharacters(self, token): - pass - - def processCharacters(self, token): - self.startTagHead(impliedTagToken("head", "StartTag")) - return token - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagHead(self, token): - self.tree.insertElement(token) - self.tree.headPointer = self.tree.openElements[-1] - self.parser.phase = self.parser.phases["inHead"] - - def startTagOther(self, token): - self.startTagHead(impliedTagToken("head", "StartTag")) - return token - - def endTagImplyHead(self, token): - self.startTagHead(impliedTagToken("head", "StartTag")) - return token - - def endTagOther(self, token): - self.parser.parseError("end-tag-after-implied-root", - {"name": token["name"]}) - - startTagHandler = _utils.MethodDispatcher([ - ("html", startTagHtml), - ("head", startTagHead) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - (("head", "body", "html", "br"), endTagImplyHead) - ]) - endTagHandler.default = endTagOther - - class InHeadPhase(Phase): - __slots__ = tuple() - - # the real thing - def processEOF(self): - self.anythingElse() - return True - - def processCharacters(self, token): - self.anythingElse() - return token - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagHead(self, token): - self.parser.parseError("two-heads-are-not-better-than-one") - - def startTagBaseLinkCommand(self, token): - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def startTagMeta(self, token): - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - attributes = token["data"] - if self.parser.tokenizer.stream.charEncoding[1] == "tentative": - if "charset" in attributes: - self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) - elif ("content" in attributes and - "http-equiv" in attributes and - attributes["http-equiv"].lower() == "content-type"): - # Encoding it as UTF-8 here is a hack, as really we should pass - # the abstract Unicode string, and just use the - # ContentAttrParser on that, but using UTF-8 allows all chars - # to be encoded and as a ASCII-superset works. - data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8")) - parser = _inputstream.ContentAttrParser(data) - codec = parser.parse() - self.parser.tokenizer.stream.changeEncoding(codec) - - def startTagTitle(self, token): - self.parser.parseRCDataRawtext(token, "RCDATA") - - def startTagNoFramesStyle(self, token): - # Need to decide whether to implement the scripting-disabled case - self.parser.parseRCDataRawtext(token, "RAWTEXT") - - def startTagNoscript(self, token): - if self.parser.scripting: - self.parser.parseRCDataRawtext(token, "RAWTEXT") - else: - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inHeadNoscript"] - - def startTagScript(self, token): - self.tree.insertElement(token) - self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState - self.parser.originalPhase = self.parser.phase - self.parser.phase = self.parser.phases["text"] - - def startTagOther(self, token): - self.anythingElse() - return token - - def endTagHead(self, token): - node = self.parser.tree.openElements.pop() - assert node.name == "head", "Expected head got %s" % node.name - self.parser.phase = self.parser.phases["afterHead"] - - def endTagHtmlBodyBr(self, token): - self.anythingElse() - return token - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def anythingElse(self): - self.endTagHead(impliedTagToken("head")) - - startTagHandler = _utils.MethodDispatcher([ - ("html", startTagHtml), - ("title", startTagTitle), - (("noframes", "style"), startTagNoFramesStyle), - ("noscript", startTagNoscript), - ("script", startTagScript), - (("base", "basefont", "bgsound", "command", "link"), - startTagBaseLinkCommand), - ("meta", startTagMeta), - ("head", startTagHead) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - ("head", endTagHead), - (("br", "html", "body"), endTagHtmlBodyBr) - ]) - endTagHandler.default = endTagOther - - class InHeadNoscriptPhase(Phase): - __slots__ = tuple() - - def processEOF(self): - self.parser.parseError("eof-in-head-noscript") - self.anythingElse() - return True - - def processComment(self, token): - return self.parser.phases["inHead"].processComment(token) - - def processCharacters(self, token): - self.parser.parseError("char-in-head-noscript") - self.anythingElse() - return token - - def processSpaceCharacters(self, token): - return self.parser.phases["inHead"].processSpaceCharacters(token) - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagBaseLinkCommand(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagHeadNoscript(self, token): - self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) - - def startTagOther(self, token): - self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) - self.anythingElse() - return token - - def endTagNoscript(self, token): - node = self.parser.tree.openElements.pop() - assert node.name == "noscript", "Expected noscript got %s" % node.name - self.parser.phase = self.parser.phases["inHead"] - - def endTagBr(self, token): - self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) - self.anythingElse() - return token - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def anythingElse(self): - # Caller must raise parse error first! - self.endTagNoscript(impliedTagToken("noscript")) - - startTagHandler = _utils.MethodDispatcher([ - ("html", startTagHtml), - (("basefont", "bgsound", "link", "meta", "noframes", "style"), startTagBaseLinkCommand), - (("head", "noscript"), startTagHeadNoscript), - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - ("noscript", endTagNoscript), - ("br", endTagBr), - ]) - endTagHandler.default = endTagOther - - class AfterHeadPhase(Phase): - __slots__ = tuple() - - def processEOF(self): - self.anythingElse() - return True - - def processCharacters(self, token): - self.anythingElse() - return token - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagBody(self, token): - self.parser.framesetOK = False - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inBody"] - - def startTagFrameset(self, token): - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inFrameset"] - - def startTagFromHead(self, token): - self.parser.parseError("unexpected-start-tag-out-of-my-head", - {"name": token["name"]}) - self.tree.openElements.append(self.tree.headPointer) - self.parser.phases["inHead"].processStartTag(token) - for node in self.tree.openElements[::-1]: - if node.name == "head": - self.tree.openElements.remove(node) - break - - def startTagHead(self, token): - self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) - - def startTagOther(self, token): - self.anythingElse() - return token - - def endTagHtmlBodyBr(self, token): - self.anythingElse() - return token - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def anythingElse(self): - self.tree.insertElement(impliedTagToken("body", "StartTag")) - self.parser.phase = self.parser.phases["inBody"] - self.parser.framesetOK = True - - startTagHandler = _utils.MethodDispatcher([ - ("html", startTagHtml), - ("body", startTagBody), - ("frameset", startTagFrameset), - (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", - "style", "title"), - startTagFromHead), - ("head", startTagHead) - ]) - startTagHandler.default = startTagOther - endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"), - endTagHtmlBodyBr)]) - endTagHandler.default = endTagOther - - class InBodyPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody - # the really-really-really-very crazy mode - __slots__ = ("processSpaceCharacters",) - - def __init__(self, *args, **kwargs): - super(InBodyPhase, self).__init__(*args, **kwargs) - # Set this to the default handler - self.processSpaceCharacters = self.processSpaceCharactersNonPre - - def isMatchingFormattingElement(self, node1, node2): - return (node1.name == node2.name and - node1.namespace == node2.namespace and - node1.attributes == node2.attributes) - - # helper - def addFormattingElement(self, token): - self.tree.insertElement(token) - element = self.tree.openElements[-1] - - matchingElements = [] - for node in self.tree.activeFormattingElements[::-1]: - if node is Marker: - break - elif self.isMatchingFormattingElement(node, element): - matchingElements.append(node) - - assert len(matchingElements) <= 3 - if len(matchingElements) == 3: - self.tree.activeFormattingElements.remove(matchingElements[-1]) - self.tree.activeFormattingElements.append(element) - - # the real deal - def processEOF(self): - allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td", - "tfoot", "th", "thead", "tr", "body", - "html")) - for node in self.tree.openElements[::-1]: - if node.name not in allowed_elements: - self.parser.parseError("expected-closing-tag-but-got-eof") - break - # Stop parsing - - def processSpaceCharactersDropNewline(self, token): - # Sometimes (start of <pre>, <listing>, and <textarea> blocks) we - # want to drop leading newlines - data = token["data"] - self.processSpaceCharacters = self.processSpaceCharactersNonPre - if (data.startswith("\n") and - self.tree.openElements[-1].name in ("pre", "listing", "textarea") and - not self.tree.openElements[-1].hasContent()): - data = data[1:] - if data: - self.tree.reconstructActiveFormattingElements() - self.tree.insertText(data) - - def processCharacters(self, token): - if token["data"] == "\u0000": - # The tokenizer should always emit null on its own - return - self.tree.reconstructActiveFormattingElements() - self.tree.insertText(token["data"]) - # This must be bad for performance - if (self.parser.framesetOK and - any([char not in spaceCharacters - for char in token["data"]])): - self.parser.framesetOK = False - - def processSpaceCharactersNonPre(self, token): - self.tree.reconstructActiveFormattingElements() - self.tree.insertText(token["data"]) - - def startTagProcessInHead(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagBody(self, token): - self.parser.parseError("unexpected-start-tag", {"name": "body"}) - if (len(self.tree.openElements) == 1 or - self.tree.openElements[1].name != "body"): - assert self.parser.innerHTML - else: - self.parser.framesetOK = False - for attr, value in token["data"].items(): - if attr not in self.tree.openElements[1].attributes: - self.tree.openElements[1].attributes[attr] = value - - def startTagFrameset(self, token): - self.parser.parseError("unexpected-start-tag", {"name": "frameset"}) - if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"): - assert self.parser.innerHTML - elif not self.parser.framesetOK: - pass - else: - if self.tree.openElements[1].parent: - self.tree.openElements[1].parent.removeChild(self.tree.openElements[1]) - while self.tree.openElements[-1].name != "html": - self.tree.openElements.pop() - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inFrameset"] - - def startTagCloseP(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.insertElement(token) - - def startTagPreListing(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.insertElement(token) - self.parser.framesetOK = False - self.processSpaceCharacters = self.processSpaceCharactersDropNewline - - def startTagForm(self, token): - if self.tree.formPointer: - self.parser.parseError("unexpected-start-tag", {"name": "form"}) - else: - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.insertElement(token) - self.tree.formPointer = self.tree.openElements[-1] - - def startTagListItem(self, token): - self.parser.framesetOK = False - - stopNamesMap = {"li": ["li"], - "dt": ["dt", "dd"], - "dd": ["dt", "dd"]} - stopNames = stopNamesMap[token["name"]] - for node in reversed(self.tree.openElements): - if node.name in stopNames: - self.parser.phase.processEndTag( - impliedTagToken(node.name, "EndTag")) - break - if (node.nameTuple in specialElements and - node.name not in ("address", "div", "p")): - break - - if self.tree.elementInScope("p", variant="button"): - self.parser.phase.processEndTag( - impliedTagToken("p", "EndTag")) - - self.tree.insertElement(token) - - def startTagPlaintext(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.insertElement(token) - self.parser.tokenizer.state = self.parser.tokenizer.plaintextState - - def startTagHeading(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - if self.tree.openElements[-1].name in headingElements: - self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) - self.tree.openElements.pop() - self.tree.insertElement(token) - - def startTagA(self, token): - afeAElement = self.tree.elementInActiveFormattingElements("a") - if afeAElement: - self.parser.parseError("unexpected-start-tag-implies-end-tag", - {"startName": "a", "endName": "a"}) - self.endTagFormatting(impliedTagToken("a")) - if afeAElement in self.tree.openElements: - self.tree.openElements.remove(afeAElement) - if afeAElement in self.tree.activeFormattingElements: - self.tree.activeFormattingElements.remove(afeAElement) - self.tree.reconstructActiveFormattingElements() - self.addFormattingElement(token) - - def startTagFormatting(self, token): - self.tree.reconstructActiveFormattingElements() - self.addFormattingElement(token) - - def startTagNobr(self, token): - self.tree.reconstructActiveFormattingElements() - if self.tree.elementInScope("nobr"): - self.parser.parseError("unexpected-start-tag-implies-end-tag", - {"startName": "nobr", "endName": "nobr"}) - self.processEndTag(impliedTagToken("nobr")) - # XXX Need tests that trigger the following - self.tree.reconstructActiveFormattingElements() - self.addFormattingElement(token) - - def startTagButton(self, token): - if self.tree.elementInScope("button"): - self.parser.parseError("unexpected-start-tag-implies-end-tag", - {"startName": "button", "endName": "button"}) - self.processEndTag(impliedTagToken("button")) - return token - else: - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(token) - self.parser.framesetOK = False - - def startTagAppletMarqueeObject(self, token): - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(token) - self.tree.activeFormattingElements.append(Marker) - self.parser.framesetOK = False - - def startTagXmp(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.reconstructActiveFormattingElements() - self.parser.framesetOK = False - self.parser.parseRCDataRawtext(token, "RAWTEXT") - - def startTagTable(self, token): - if self.parser.compatMode != "quirks": - if self.tree.elementInScope("p", variant="button"): - self.processEndTag(impliedTagToken("p")) - self.tree.insertElement(token) - self.parser.framesetOK = False - self.parser.phase = self.parser.phases["inTable"] - - def startTagVoidFormatting(self, token): - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - self.parser.framesetOK = False - - def startTagInput(self, token): - framesetOK = self.parser.framesetOK - self.startTagVoidFormatting(token) - if ("type" in token["data"] and - token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): - # input type=hidden doesn't change framesetOK - self.parser.framesetOK = framesetOK - - def startTagParamSource(self, token): - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def startTagHr(self, token): - if self.tree.elementInScope("p", variant="button"): - self.endTagP(impliedTagToken("p")) - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - self.parser.framesetOK = False - - def startTagImage(self, token): - # No really... - self.parser.parseError("unexpected-start-tag-treated-as", - {"originalName": "image", "newName": "img"}) - self.processStartTag(impliedTagToken("img", "StartTag", - attributes=token["data"], - selfClosing=token["selfClosing"])) - - def startTagIsIndex(self, token): - self.parser.parseError("deprecated-tag", {"name": "isindex"}) - if self.tree.formPointer: - return - form_attrs = {} - if "action" in token["data"]: - form_attrs["action"] = token["data"]["action"] - self.processStartTag(impliedTagToken("form", "StartTag", - attributes=form_attrs)) - self.processStartTag(impliedTagToken("hr", "StartTag")) - self.processStartTag(impliedTagToken("label", "StartTag")) - # XXX Localization ... - if "prompt" in token["data"]: - prompt = token["data"]["prompt"] - else: - prompt = "This is a searchable index. Enter search keywords: " - self.processCharacters( - {"type": tokenTypes["Characters"], "data": prompt}) - attributes = token["data"].copy() - if "action" in attributes: - del attributes["action"] - if "prompt" in attributes: - del attributes["prompt"] - attributes["name"] = "isindex" - self.processStartTag(impliedTagToken("input", "StartTag", - attributes=attributes, - selfClosing=token["selfClosing"])) - self.processEndTag(impliedTagToken("label")) - self.processStartTag(impliedTagToken("hr", "StartTag")) - self.processEndTag(impliedTagToken("form")) - - def startTagTextarea(self, token): - self.tree.insertElement(token) - self.parser.tokenizer.state = self.parser.tokenizer.rcdataState - self.processSpaceCharacters = self.processSpaceCharactersDropNewline - self.parser.framesetOK = False - - def startTagIFrame(self, token): - self.parser.framesetOK = False - self.startTagRawtext(token) - - def startTagNoscript(self, token): - if self.parser.scripting: - self.startTagRawtext(token) - else: - self.startTagOther(token) - - def startTagRawtext(self, token): - """iframe, noembed noframes, noscript(if scripting enabled)""" - self.parser.parseRCDataRawtext(token, "RAWTEXT") - - def startTagOpt(self, token): - if self.tree.openElements[-1].name == "option": - self.parser.phase.processEndTag(impliedTagToken("option")) - self.tree.reconstructActiveFormattingElements() - self.parser.tree.insertElement(token) - - def startTagSelect(self, token): - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(token) - self.parser.framesetOK = False - if self.parser.phase in (self.parser.phases["inTable"], - self.parser.phases["inCaption"], - self.parser.phases["inColumnGroup"], - self.parser.phases["inTableBody"], - self.parser.phases["inRow"], - self.parser.phases["inCell"]): - self.parser.phase = self.parser.phases["inSelectInTable"] - else: - self.parser.phase = self.parser.phases["inSelect"] - - def startTagRpRt(self, token): - if self.tree.elementInScope("ruby"): - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1].name != "ruby": - self.parser.parseError() - self.tree.insertElement(token) - - def startTagMath(self, token): - self.tree.reconstructActiveFormattingElements() - self.parser.adjustMathMLAttributes(token) - self.parser.adjustForeignAttributes(token) - token["namespace"] = namespaces["mathml"] - self.tree.insertElement(token) - # Need to get the parse error right for the case where the token - # has a namespace not equal to the xmlns attribute - if token["selfClosing"]: - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def startTagSvg(self, token): - self.tree.reconstructActiveFormattingElements() - self.parser.adjustSVGAttributes(token) - self.parser.adjustForeignAttributes(token) - token["namespace"] = namespaces["svg"] - self.tree.insertElement(token) - # Need to get the parse error right for the case where the token - # has a namespace not equal to the xmlns attribute - if token["selfClosing"]: - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def startTagMisplaced(self, token): - """ Elements that should be children of other elements that have a - different insertion mode; here they are ignored - "caption", "col", "colgroup", "frame", "frameset", "head", - "option", "optgroup", "tbody", "td", "tfoot", "th", "thead", - "tr", "noscript" - """ - self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]}) - - def startTagOther(self, token): - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(token) - - def endTagP(self, token): - if not self.tree.elementInScope("p", variant="button"): - self.startTagCloseP(impliedTagToken("p", "StartTag")) - self.parser.parseError("unexpected-end-tag", {"name": "p"}) - self.endTagP(impliedTagToken("p", "EndTag")) - else: - self.tree.generateImpliedEndTags("p") - if self.tree.openElements[-1].name != "p": - self.parser.parseError("unexpected-end-tag", {"name": "p"}) - node = self.tree.openElements.pop() - while node.name != "p": - node = self.tree.openElements.pop() - - def endTagBody(self, token): - if not self.tree.elementInScope("body"): - self.parser.parseError() - return - elif self.tree.openElements[-1].name != "body": - for node in self.tree.openElements[2:]: - if node.name not in frozenset(("dd", "dt", "li", "optgroup", - "option", "p", "rp", "rt", - "tbody", "td", "tfoot", - "th", "thead", "tr", "body", - "html")): - # Not sure this is the correct name for the parse error - self.parser.parseError( - "expected-one-end-tag-but-got-another", - {"gotName": "body", "expectedName": node.name}) - break - self.parser.phase = self.parser.phases["afterBody"] - - def endTagHtml(self, token): - # We repeat the test for the body end tag token being ignored here - if self.tree.elementInScope("body"): - self.endTagBody(impliedTagToken("body")) - return token - - def endTagBlock(self, token): - # Put us back in the right whitespace handling mode - if token["name"] == "pre": - self.processSpaceCharacters = self.processSpaceCharactersNonPre - inScope = self.tree.elementInScope(token["name"]) - if inScope: - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError("end-tag-too-early", {"name": token["name"]}) - if inScope: - node = self.tree.openElements.pop() - while node.name != token["name"]: - node = self.tree.openElements.pop() - - def endTagForm(self, token): - node = self.tree.formPointer - self.tree.formPointer = None - if node is None or not self.tree.elementInScope(node): - self.parser.parseError("unexpected-end-tag", - {"name": "form"}) - else: - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1] != node: - self.parser.parseError("end-tag-too-early-ignored", - {"name": "form"}) - self.tree.openElements.remove(node) - - def endTagListItem(self, token): - if token["name"] == "li": - variant = "list" - else: - variant = None - if not self.tree.elementInScope(token["name"], variant=variant): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - else: - self.tree.generateImpliedEndTags(exclude=token["name"]) - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError( - "end-tag-too-early", - {"name": token["name"]}) - node = self.tree.openElements.pop() - while node.name != token["name"]: - node = self.tree.openElements.pop() - - def endTagHeading(self, token): - for item in headingElements: - if self.tree.elementInScope(item): - self.tree.generateImpliedEndTags() - break - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError("end-tag-too-early", {"name": token["name"]}) - - for item in headingElements: - if self.tree.elementInScope(item): - item = self.tree.openElements.pop() - while item.name not in headingElements: - item = self.tree.openElements.pop() - break - - def endTagFormatting(self, token): - """The much-feared adoption agency algorithm""" - # http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867 - # XXX Better parseError messages appreciated. - - # Step 1 - outerLoopCounter = 0 - - # Step 2 - while outerLoopCounter < 8: - - # Step 3 - outerLoopCounter += 1 - - # Step 4: - - # Let the formatting element be the last element in - # the list of active formatting elements that: - # - is between the end of the list and the last scope - # marker in the list, if any, or the start of the list - # otherwise, and - # - has the same tag name as the token. - formattingElement = self.tree.elementInActiveFormattingElements( - token["name"]) - if (not formattingElement or - (formattingElement in self.tree.openElements and - not self.tree.elementInScope(formattingElement.name))): - # If there is no such node, then abort these steps - # and instead act as described in the "any other - # end tag" entry below. - self.endTagOther(token) - return - - # Otherwise, if there is such a node, but that node is - # not in the stack of open elements, then this is a - # parse error; remove the element from the list, and - # abort these steps. - elif formattingElement not in self.tree.openElements: - self.parser.parseError("adoption-agency-1.2", {"name": token["name"]}) - self.tree.activeFormattingElements.remove(formattingElement) - return - - # Otherwise, if there is such a node, and that node is - # also in the stack of open elements, but the element - # is not in scope, then this is a parse error; ignore - # the token, and abort these steps. - elif not self.tree.elementInScope(formattingElement.name): - self.parser.parseError("adoption-agency-4.4", {"name": token["name"]}) - return - - # Otherwise, there is a formatting element and that - # element is in the stack and is in scope. If the - # element is not the current node, this is a parse - # error. In any case, proceed with the algorithm as - # written in the following steps. - else: - if formattingElement != self.tree.openElements[-1]: - self.parser.parseError("adoption-agency-1.3", {"name": token["name"]}) - - # Step 5: - - # Let the furthest block be the topmost node in the - # stack of open elements that is lower in the stack - # than the formatting element, and is an element in - # the special category. There might not be one. - afeIndex = self.tree.openElements.index(formattingElement) - furthestBlock = None - for element in self.tree.openElements[afeIndex:]: - if element.nameTuple in specialElements: - furthestBlock = element - break - - # Step 6: - - # If there is no furthest block, then the UA must - # first pop all the nodes from the bottom of the stack - # of open elements, from the current node up to and - # including the formatting element, then remove the - # formatting element from the list of active - # formatting elements, and finally abort these steps. - if furthestBlock is None: - element = self.tree.openElements.pop() - while element != formattingElement: - element = self.tree.openElements.pop() - self.tree.activeFormattingElements.remove(element) - return - - # Step 7 - commonAncestor = self.tree.openElements[afeIndex - 1] - - # Step 8: - # The bookmark is supposed to help us identify where to reinsert - # nodes in step 15. We have to ensure that we reinsert nodes after - # the node before the active formatting element. Note the bookmark - # can move in step 9.7 - bookmark = self.tree.activeFormattingElements.index(formattingElement) - - # Step 9 - lastNode = node = furthestBlock - innerLoopCounter = 0 - - index = self.tree.openElements.index(node) - while innerLoopCounter < 3: - innerLoopCounter += 1 - # Node is element before node in open elements - index -= 1 - node = self.tree.openElements[index] - if node not in self.tree.activeFormattingElements: - self.tree.openElements.remove(node) - continue - # Step 9.6 - if node == formattingElement: - break - # Step 9.7 - if lastNode == furthestBlock: - bookmark = self.tree.activeFormattingElements.index(node) + 1 - # Step 9.8 - clone = node.cloneNode() - # Replace node with clone - self.tree.activeFormattingElements[ - self.tree.activeFormattingElements.index(node)] = clone - self.tree.openElements[ - self.tree.openElements.index(node)] = clone - node = clone - # Step 9.9 - # Remove lastNode from its parents, if any - if lastNode.parent: - lastNode.parent.removeChild(lastNode) - node.appendChild(lastNode) - # Step 9.10 - lastNode = node - - # Step 10 - # Foster parent lastNode if commonAncestor is a - # table, tbody, tfoot, thead, or tr we need to foster - # parent the lastNode - if lastNode.parent: - lastNode.parent.removeChild(lastNode) - - if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")): - parent, insertBefore = self.tree.getTableMisnestedNodePosition() - parent.insertBefore(lastNode, insertBefore) - else: - commonAncestor.appendChild(lastNode) - - # Step 11 - clone = formattingElement.cloneNode() - - # Step 12 - furthestBlock.reparentChildren(clone) - - # Step 13 - furthestBlock.appendChild(clone) - - # Step 14 - self.tree.activeFormattingElements.remove(formattingElement) - self.tree.activeFormattingElements.insert(bookmark, clone) - - # Step 15 - self.tree.openElements.remove(formattingElement) - self.tree.openElements.insert( - self.tree.openElements.index(furthestBlock) + 1, clone) - - def endTagAppletMarqueeObject(self, token): - if self.tree.elementInScope(token["name"]): - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError("end-tag-too-early", {"name": token["name"]}) - - if self.tree.elementInScope(token["name"]): - element = self.tree.openElements.pop() - while element.name != token["name"]: - element = self.tree.openElements.pop() - self.tree.clearActiveFormattingElements() - - def endTagBr(self, token): - self.parser.parseError("unexpected-end-tag-treated-as", - {"originalName": "br", "newName": "br element"}) - self.tree.reconstructActiveFormattingElements() - self.tree.insertElement(impliedTagToken("br", "StartTag")) - self.tree.openElements.pop() - - def endTagOther(self, token): - for node in self.tree.openElements[::-1]: - if node.name == token["name"]: - self.tree.generateImpliedEndTags(exclude=token["name"]) - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - while self.tree.openElements.pop() != node: - pass - break - else: - if node.nameTuple in specialElements: - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - break - - startTagHandler = _utils.MethodDispatcher([ - ("html", Phase.startTagHtml), - (("base", "basefont", "bgsound", "command", "link", "meta", - "script", "style", "title"), - startTagProcessInHead), - ("body", startTagBody), - ("frameset", startTagFrameset), - (("address", "article", "aside", "blockquote", "center", "details", - "dir", "div", "dl", "fieldset", "figcaption", "figure", - "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p", - "section", "summary", "ul"), - startTagCloseP), - (headingElements, startTagHeading), - (("pre", "listing"), startTagPreListing), - ("form", startTagForm), - (("li", "dd", "dt"), startTagListItem), - ("plaintext", startTagPlaintext), - ("a", startTagA), - (("b", "big", "code", "em", "font", "i", "s", "small", "strike", - "strong", "tt", "u"), startTagFormatting), - ("nobr", startTagNobr), - ("button", startTagButton), - (("applet", "marquee", "object"), startTagAppletMarqueeObject), - ("xmp", startTagXmp), - ("table", startTagTable), - (("area", "br", "embed", "img", "keygen", "wbr"), - startTagVoidFormatting), - (("param", "source", "track"), startTagParamSource), - ("input", startTagInput), - ("hr", startTagHr), - ("image", startTagImage), - ("isindex", startTagIsIndex), - ("textarea", startTagTextarea), - ("iframe", startTagIFrame), - ("noscript", startTagNoscript), - (("noembed", "noframes"), startTagRawtext), - ("select", startTagSelect), - (("rp", "rt"), startTagRpRt), - (("option", "optgroup"), startTagOpt), - (("math"), startTagMath), - (("svg"), startTagSvg), - (("caption", "col", "colgroup", "frame", "head", - "tbody", "td", "tfoot", "th", "thead", - "tr"), startTagMisplaced) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - ("body", endTagBody), - ("html", endTagHtml), - (("address", "article", "aside", "blockquote", "button", "center", - "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure", - "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre", - "section", "summary", "ul"), endTagBlock), - ("form", endTagForm), - ("p", endTagP), - (("dd", "dt", "li"), endTagListItem), - (headingElements, endTagHeading), - (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small", - "strike", "strong", "tt", "u"), endTagFormatting), - (("applet", "marquee", "object"), endTagAppletMarqueeObject), - ("br", endTagBr), - ]) - endTagHandler.default = endTagOther - - class TextPhase(Phase): - __slots__ = tuple() - - def processCharacters(self, token): - self.tree.insertText(token["data"]) - - def processEOF(self): - self.parser.parseError("expected-named-closing-tag-but-got-eof", - {"name": self.tree.openElements[-1].name}) - self.tree.openElements.pop() - self.parser.phase = self.parser.originalPhase - return True - - def startTagOther(self, token): - assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name'] - - def endTagScript(self, token): - node = self.tree.openElements.pop() - assert node.name == "script" - self.parser.phase = self.parser.originalPhase - # The rest of this method is all stuff that only happens if - # document.write works - - def endTagOther(self, token): - self.tree.openElements.pop() - self.parser.phase = self.parser.originalPhase - - startTagHandler = _utils.MethodDispatcher([]) - startTagHandler.default = startTagOther - endTagHandler = _utils.MethodDispatcher([ - ("script", endTagScript)]) - endTagHandler.default = endTagOther - - class InTablePhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-table - __slots__ = tuple() - - # helper methods - def clearStackToTableContext(self): - # "clear the stack back to a table context" - while self.tree.openElements[-1].name not in ("table", "html"): - # self.parser.parseError("unexpected-implied-end-tag-in-table", - # {"name": self.tree.openElements[-1].name}) - self.tree.openElements.pop() - # When the current node is <html> it's an innerHTML case - - # processing methods - def processEOF(self): - if self.tree.openElements[-1].name != "html": - self.parser.parseError("eof-in-table") - else: - assert self.parser.innerHTML - # Stop parsing - - def processSpaceCharacters(self, token): - originalPhase = self.parser.phase - self.parser.phase = self.parser.phases["inTableText"] - self.parser.phase.originalPhase = originalPhase - self.parser.phase.processSpaceCharacters(token) - - def processCharacters(self, token): - originalPhase = self.parser.phase - self.parser.phase = self.parser.phases["inTableText"] - self.parser.phase.originalPhase = originalPhase - self.parser.phase.processCharacters(token) - - def insertText(self, token): - # If we get here there must be at least one non-whitespace character - # Do the table magic! - self.tree.insertFromTable = True - self.parser.phases["inBody"].processCharacters(token) - self.tree.insertFromTable = False - - def startTagCaption(self, token): - self.clearStackToTableContext() - self.tree.activeFormattingElements.append(Marker) - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inCaption"] - - def startTagColgroup(self, token): - self.clearStackToTableContext() - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inColumnGroup"] - - def startTagCol(self, token): - self.startTagColgroup(impliedTagToken("colgroup", "StartTag")) - return token - - def startTagRowGroup(self, token): - self.clearStackToTableContext() - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inTableBody"] - - def startTagImplyTbody(self, token): - self.startTagRowGroup(impliedTagToken("tbody", "StartTag")) - return token - - def startTagTable(self, token): - self.parser.parseError("unexpected-start-tag-implies-end-tag", - {"startName": "table", "endName": "table"}) - self.parser.phase.processEndTag(impliedTagToken("table")) - if not self.parser.innerHTML: - return token - - def startTagStyleScript(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagInput(self, token): - if ("type" in token["data"] and - token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): - self.parser.parseError("unexpected-hidden-input-in-table") - self.tree.insertElement(token) - # XXX associate with form - self.tree.openElements.pop() - else: - self.startTagOther(token) - - def startTagForm(self, token): - self.parser.parseError("unexpected-form-in-table") - if self.tree.formPointer is None: - self.tree.insertElement(token) - self.tree.formPointer = self.tree.openElements[-1] - self.tree.openElements.pop() - - def startTagOther(self, token): - self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]}) - # Do the table magic! - self.tree.insertFromTable = True - self.parser.phases["inBody"].processStartTag(token) - self.tree.insertFromTable = False - - def endTagTable(self, token): - if self.tree.elementInScope("table", variant="table"): - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1].name != "table": - self.parser.parseError("end-tag-too-early-named", - {"gotName": "table", - "expectedName": self.tree.openElements[-1].name}) - while self.tree.openElements[-1].name != "table": - self.tree.openElements.pop() - self.tree.openElements.pop() - self.parser.resetInsertionMode() - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def endTagIgnore(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]}) - # Do the table magic! - self.tree.insertFromTable = True - self.parser.phases["inBody"].processEndTag(token) - self.tree.insertFromTable = False - - startTagHandler = _utils.MethodDispatcher([ - ("html", Phase.startTagHtml), - ("caption", startTagCaption), - ("colgroup", startTagColgroup), - ("col", startTagCol), - (("tbody", "tfoot", "thead"), startTagRowGroup), - (("td", "th", "tr"), startTagImplyTbody), - ("table", startTagTable), - (("style", "script"), startTagStyleScript), - ("input", startTagInput), - ("form", startTagForm) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - ("table", endTagTable), - (("body", "caption", "col", "colgroup", "html", "tbody", "td", - "tfoot", "th", "thead", "tr"), endTagIgnore) - ]) - endTagHandler.default = endTagOther - - class InTableTextPhase(Phase): - __slots__ = ("originalPhase", "characterTokens") - - def __init__(self, *args, **kwargs): - super(InTableTextPhase, self).__init__(*args, **kwargs) - self.originalPhase = None - self.characterTokens = [] - - def flushCharacters(self): - data = "".join([item["data"] for item in self.characterTokens]) - if any([item not in spaceCharacters for item in data]): - token = {"type": tokenTypes["Characters"], "data": data} - self.parser.phases["inTable"].insertText(token) - elif data: - self.tree.insertText(data) - self.characterTokens = [] - - def processComment(self, token): - self.flushCharacters() - self.parser.phase = self.originalPhase - return token - - def processEOF(self): - self.flushCharacters() - self.parser.phase = self.originalPhase - return True - - def processCharacters(self, token): - if token["data"] == "\u0000": - return - self.characterTokens.append(token) - - def processSpaceCharacters(self, token): - # pretty sure we should never reach here - self.characterTokens.append(token) - # assert False - - def processStartTag(self, token): - self.flushCharacters() - self.parser.phase = self.originalPhase - return token - - def processEndTag(self, token): - self.flushCharacters() - self.parser.phase = self.originalPhase - return token - - class InCaptionPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-caption - __slots__ = tuple() - - def ignoreEndTagCaption(self): - return not self.tree.elementInScope("caption", variant="table") - - def processEOF(self): - self.parser.phases["inBody"].processEOF() - - def processCharacters(self, token): - return self.parser.phases["inBody"].processCharacters(token) - - def startTagTableElement(self, token): - self.parser.parseError() - # XXX Have to duplicate logic here to find out if the tag is ignored - ignoreEndTag = self.ignoreEndTagCaption() - self.parser.phase.processEndTag(impliedTagToken("caption")) - if not ignoreEndTag: - return token - - def startTagOther(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def endTagCaption(self, token): - if not self.ignoreEndTagCaption(): - # AT this code is quite similar to endTagTable in "InTable" - self.tree.generateImpliedEndTags() - if self.tree.openElements[-1].name != "caption": - self.parser.parseError("expected-one-end-tag-but-got-another", - {"gotName": "caption", - "expectedName": self.tree.openElements[-1].name}) - while self.tree.openElements[-1].name != "caption": - self.tree.openElements.pop() - self.tree.openElements.pop() - self.tree.clearActiveFormattingElements() - self.parser.phase = self.parser.phases["inTable"] - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def endTagTable(self, token): - self.parser.parseError() - ignoreEndTag = self.ignoreEndTagCaption() - self.parser.phase.processEndTag(impliedTagToken("caption")) - if not ignoreEndTag: - return token - - def endTagIgnore(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def endTagOther(self, token): - return self.parser.phases["inBody"].processEndTag(token) - - startTagHandler = _utils.MethodDispatcher([ - ("html", Phase.startTagHtml), - (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", - "thead", "tr"), startTagTableElement) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - ("caption", endTagCaption), - ("table", endTagTable), - (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", - "thead", "tr"), endTagIgnore) - ]) - endTagHandler.default = endTagOther - - class InColumnGroupPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-column - __slots__ = tuple() - - def ignoreEndTagColgroup(self): - return self.tree.openElements[-1].name == "html" - - def processEOF(self): - if self.tree.openElements[-1].name == "html": - assert self.parser.innerHTML - return - else: - ignoreEndTag = self.ignoreEndTagColgroup() - self.endTagColgroup(impliedTagToken("colgroup")) - if not ignoreEndTag: - return True - - def processCharacters(self, token): - ignoreEndTag = self.ignoreEndTagColgroup() - self.endTagColgroup(impliedTagToken("colgroup")) - if not ignoreEndTag: - return token - - def startTagCol(self, token): - self.tree.insertElement(token) - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def startTagOther(self, token): - ignoreEndTag = self.ignoreEndTagColgroup() - self.endTagColgroup(impliedTagToken("colgroup")) - if not ignoreEndTag: - return token - - def endTagColgroup(self, token): - if self.ignoreEndTagColgroup(): - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - else: - self.tree.openElements.pop() - self.parser.phase = self.parser.phases["inTable"] - - def endTagCol(self, token): - self.parser.parseError("no-end-tag", {"name": "col"}) - - def endTagOther(self, token): - ignoreEndTag = self.ignoreEndTagColgroup() - self.endTagColgroup(impliedTagToken("colgroup")) - if not ignoreEndTag: - return token - - startTagHandler = _utils.MethodDispatcher([ - ("html", Phase.startTagHtml), - ("col", startTagCol) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - ("colgroup", endTagColgroup), - ("col", endTagCol) - ]) - endTagHandler.default = endTagOther - - class InTableBodyPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-table0 - __slots__ = tuple() - - # helper methods - def clearStackToTableBodyContext(self): - while self.tree.openElements[-1].name not in ("tbody", "tfoot", - "thead", "html"): - # self.parser.parseError("unexpected-implied-end-tag-in-table", - # {"name": self.tree.openElements[-1].name}) - self.tree.openElements.pop() - if self.tree.openElements[-1].name == "html": - assert self.parser.innerHTML - - # the rest - def processEOF(self): - self.parser.phases["inTable"].processEOF() - - def processSpaceCharacters(self, token): - return self.parser.phases["inTable"].processSpaceCharacters(token) - - def processCharacters(self, token): - return self.parser.phases["inTable"].processCharacters(token) - - def startTagTr(self, token): - self.clearStackToTableBodyContext() - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inRow"] - - def startTagTableCell(self, token): - self.parser.parseError("unexpected-cell-in-table-body", - {"name": token["name"]}) - self.startTagTr(impliedTagToken("tr", "StartTag")) - return token - - def startTagTableOther(self, token): - # XXX AT Any ideas on how to share this with endTagTable? - if (self.tree.elementInScope("tbody", variant="table") or - self.tree.elementInScope("thead", variant="table") or - self.tree.elementInScope("tfoot", variant="table")): - self.clearStackToTableBodyContext() - self.endTagTableRowGroup( - impliedTagToken(self.tree.openElements[-1].name)) - return token - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def startTagOther(self, token): - return self.parser.phases["inTable"].processStartTag(token) - - def endTagTableRowGroup(self, token): - if self.tree.elementInScope(token["name"], variant="table"): - self.clearStackToTableBodyContext() - self.tree.openElements.pop() - self.parser.phase = self.parser.phases["inTable"] - else: - self.parser.parseError("unexpected-end-tag-in-table-body", - {"name": token["name"]}) - - def endTagTable(self, token): - if (self.tree.elementInScope("tbody", variant="table") or - self.tree.elementInScope("thead", variant="table") or - self.tree.elementInScope("tfoot", variant="table")): - self.clearStackToTableBodyContext() - self.endTagTableRowGroup( - impliedTagToken(self.tree.openElements[-1].name)) - return token - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def endTagIgnore(self, token): - self.parser.parseError("unexpected-end-tag-in-table-body", - {"name": token["name"]}) - - def endTagOther(self, token): - return self.parser.phases["inTable"].processEndTag(token) - - startTagHandler = _utils.MethodDispatcher([ - ("html", Phase.startTagHtml), - ("tr", startTagTr), - (("td", "th"), startTagTableCell), - (("caption", "col", "colgroup", "tbody", "tfoot", "thead"), - startTagTableOther) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - (("tbody", "tfoot", "thead"), endTagTableRowGroup), - ("table", endTagTable), - (("body", "caption", "col", "colgroup", "html", "td", "th", - "tr"), endTagIgnore) - ]) - endTagHandler.default = endTagOther - - class InRowPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-row - __slots__ = tuple() - - # helper methods (XXX unify this with other table helper methods) - def clearStackToTableRowContext(self): - while self.tree.openElements[-1].name not in ("tr", "html"): - self.parser.parseError("unexpected-implied-end-tag-in-table-row", - {"name": self.tree.openElements[-1].name}) - self.tree.openElements.pop() - - def ignoreEndTagTr(self): - return not self.tree.elementInScope("tr", variant="table") - - # the rest - def processEOF(self): - self.parser.phases["inTable"].processEOF() - - def processSpaceCharacters(self, token): - return self.parser.phases["inTable"].processSpaceCharacters(token) - - def processCharacters(self, token): - return self.parser.phases["inTable"].processCharacters(token) - - def startTagTableCell(self, token): - self.clearStackToTableRowContext() - self.tree.insertElement(token) - self.parser.phase = self.parser.phases["inCell"] - self.tree.activeFormattingElements.append(Marker) - - def startTagTableOther(self, token): - ignoreEndTag = self.ignoreEndTagTr() - self.endTagTr(impliedTagToken("tr")) - # XXX how are we sure it's always ignored in the innerHTML case? - if not ignoreEndTag: - return token - - def startTagOther(self, token): - return self.parser.phases["inTable"].processStartTag(token) - - def endTagTr(self, token): - if not self.ignoreEndTagTr(): - self.clearStackToTableRowContext() - self.tree.openElements.pop() - self.parser.phase = self.parser.phases["inTableBody"] - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def endTagTable(self, token): - ignoreEndTag = self.ignoreEndTagTr() - self.endTagTr(impliedTagToken("tr")) - # Reprocess the current tag if the tr end tag was not ignored - # XXX how are we sure it's always ignored in the innerHTML case? - if not ignoreEndTag: - return token - - def endTagTableRowGroup(self, token): - if self.tree.elementInScope(token["name"], variant="table"): - self.endTagTr(impliedTagToken("tr")) - return token - else: - self.parser.parseError() - - def endTagIgnore(self, token): - self.parser.parseError("unexpected-end-tag-in-table-row", - {"name": token["name"]}) - - def endTagOther(self, token): - return self.parser.phases["inTable"].processEndTag(token) - - startTagHandler = _utils.MethodDispatcher([ - ("html", Phase.startTagHtml), - (("td", "th"), startTagTableCell), - (("caption", "col", "colgroup", "tbody", "tfoot", "thead", - "tr"), startTagTableOther) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - ("tr", endTagTr), - ("table", endTagTable), - (("tbody", "tfoot", "thead"), endTagTableRowGroup), - (("body", "caption", "col", "colgroup", "html", "td", "th"), - endTagIgnore) - ]) - endTagHandler.default = endTagOther - - class InCellPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-cell - __slots__ = tuple() - - # helper - def closeCell(self): - if self.tree.elementInScope("td", variant="table"): - self.endTagTableCell(impliedTagToken("td")) - elif self.tree.elementInScope("th", variant="table"): - self.endTagTableCell(impliedTagToken("th")) - - # the rest - def processEOF(self): - self.parser.phases["inBody"].processEOF() - - def processCharacters(self, token): - return self.parser.phases["inBody"].processCharacters(token) - - def startTagTableOther(self, token): - if (self.tree.elementInScope("td", variant="table") or - self.tree.elementInScope("th", variant="table")): - self.closeCell() - return token - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def startTagOther(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def endTagTableCell(self, token): - if self.tree.elementInScope(token["name"], variant="table"): - self.tree.generateImpliedEndTags(token["name"]) - if self.tree.openElements[-1].name != token["name"]: - self.parser.parseError("unexpected-cell-end-tag", - {"name": token["name"]}) - while True: - node = self.tree.openElements.pop() - if node.name == token["name"]: - break - else: - self.tree.openElements.pop() - self.tree.clearActiveFormattingElements() - self.parser.phase = self.parser.phases["inRow"] - else: - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def endTagIgnore(self, token): - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - def endTagImply(self, token): - if self.tree.elementInScope(token["name"], variant="table"): - self.closeCell() - return token - else: - # sometimes innerHTML case - self.parser.parseError() - - def endTagOther(self, token): - return self.parser.phases["inBody"].processEndTag(token) - - startTagHandler = _utils.MethodDispatcher([ - ("html", Phase.startTagHtml), - (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", - "thead", "tr"), startTagTableOther) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - (("td", "th"), endTagTableCell), - (("body", "caption", "col", "colgroup", "html"), endTagIgnore), - (("table", "tbody", "tfoot", "thead", "tr"), endTagImply) - ]) - endTagHandler.default = endTagOther - - class InSelectPhase(Phase): - __slots__ = tuple() - - # http://www.whatwg.org/specs/web-apps/current-work/#in-select - def processEOF(self): - if self.tree.openElements[-1].name != "html": - self.parser.parseError("eof-in-select") - else: - assert self.parser.innerHTML - - def processCharacters(self, token): - if token["data"] == "\u0000": - return - self.tree.insertText(token["data"]) - - def startTagOption(self, token): - # We need to imply </option> if <option> is the current node. - if self.tree.openElements[-1].name == "option": - self.tree.openElements.pop() - self.tree.insertElement(token) - - def startTagOptgroup(self, token): - if self.tree.openElements[-1].name == "option": - self.tree.openElements.pop() - if self.tree.openElements[-1].name == "optgroup": - self.tree.openElements.pop() - self.tree.insertElement(token) - - def startTagSelect(self, token): - self.parser.parseError("unexpected-select-in-select") - self.endTagSelect(impliedTagToken("select")) - - def startTagInput(self, token): - self.parser.parseError("unexpected-input-in-select") - if self.tree.elementInScope("select", variant="select"): - self.endTagSelect(impliedTagToken("select")) - return token - else: - assert self.parser.innerHTML - - def startTagScript(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("unexpected-start-tag-in-select", - {"name": token["name"]}) - - def endTagOption(self, token): - if self.tree.openElements[-1].name == "option": - self.tree.openElements.pop() - else: - self.parser.parseError("unexpected-end-tag-in-select", - {"name": "option"}) - - def endTagOptgroup(self, token): - # </optgroup> implicitly closes <option> - if (self.tree.openElements[-1].name == "option" and - self.tree.openElements[-2].name == "optgroup"): - self.tree.openElements.pop() - # It also closes </optgroup> - if self.tree.openElements[-1].name == "optgroup": - self.tree.openElements.pop() - # But nothing else - else: - self.parser.parseError("unexpected-end-tag-in-select", - {"name": "optgroup"}) - - def endTagSelect(self, token): - if self.tree.elementInScope("select", variant="select"): - node = self.tree.openElements.pop() - while node.name != "select": - node = self.tree.openElements.pop() - self.parser.resetInsertionMode() - else: - # innerHTML case - assert self.parser.innerHTML - self.parser.parseError() - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag-in-select", - {"name": token["name"]}) - - startTagHandler = _utils.MethodDispatcher([ - ("html", Phase.startTagHtml), - ("option", startTagOption), - ("optgroup", startTagOptgroup), - ("select", startTagSelect), - (("input", "keygen", "textarea"), startTagInput), - ("script", startTagScript) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - ("option", endTagOption), - ("optgroup", endTagOptgroup), - ("select", endTagSelect) - ]) - endTagHandler.default = endTagOther - - class InSelectInTablePhase(Phase): - __slots__ = tuple() - - def processEOF(self): - self.parser.phases["inSelect"].processEOF() - - def processCharacters(self, token): - return self.parser.phases["inSelect"].processCharacters(token) - - def startTagTable(self, token): - self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]}) - self.endTagOther(impliedTagToken("select")) - return token - - def startTagOther(self, token): - return self.parser.phases["inSelect"].processStartTag(token) - - def endTagTable(self, token): - self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]}) - if self.tree.elementInScope(token["name"], variant="table"): - self.endTagOther(impliedTagToken("select")) - return token - - def endTagOther(self, token): - return self.parser.phases["inSelect"].processEndTag(token) - - startTagHandler = _utils.MethodDispatcher([ - (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), - startTagTable) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), - endTagTable) - ]) - endTagHandler.default = endTagOther - - class InForeignContentPhase(Phase): - __slots__ = tuple() - - breakoutElements = frozenset(["b", "big", "blockquote", "body", "br", - "center", "code", "dd", "div", "dl", "dt", - "em", "embed", "h1", "h2", "h3", - "h4", "h5", "h6", "head", "hr", "i", "img", - "li", "listing", "menu", "meta", "nobr", - "ol", "p", "pre", "ruby", "s", "small", - "span", "strong", "strike", "sub", "sup", - "table", "tt", "u", "ul", "var"]) - - def adjustSVGTagNames(self, token): - replacements = {"altglyph": "altGlyph", - "altglyphdef": "altGlyphDef", - "altglyphitem": "altGlyphItem", - "animatecolor": "animateColor", - "animatemotion": "animateMotion", - "animatetransform": "animateTransform", - "clippath": "clipPath", - "feblend": "feBlend", - "fecolormatrix": "feColorMatrix", - "fecomponenttransfer": "feComponentTransfer", - "fecomposite": "feComposite", - "feconvolvematrix": "feConvolveMatrix", - "fediffuselighting": "feDiffuseLighting", - "fedisplacementmap": "feDisplacementMap", - "fedistantlight": "feDistantLight", - "feflood": "feFlood", - "fefunca": "feFuncA", - "fefuncb": "feFuncB", - "fefuncg": "feFuncG", - "fefuncr": "feFuncR", - "fegaussianblur": "feGaussianBlur", - "feimage": "feImage", - "femerge": "feMerge", - "femergenode": "feMergeNode", - "femorphology": "feMorphology", - "feoffset": "feOffset", - "fepointlight": "fePointLight", - "fespecularlighting": "feSpecularLighting", - "fespotlight": "feSpotLight", - "fetile": "feTile", - "feturbulence": "feTurbulence", - "foreignobject": "foreignObject", - "glyphref": "glyphRef", - "lineargradient": "linearGradient", - "radialgradient": "radialGradient", - "textpath": "textPath"} - - if token["name"] in replacements: - token["name"] = replacements[token["name"]] - - def processCharacters(self, token): - if token["data"] == "\u0000": - token["data"] = "\uFFFD" - elif (self.parser.framesetOK and - any(char not in spaceCharacters for char in token["data"])): - self.parser.framesetOK = False - Phase.processCharacters(self, token) - - def processStartTag(self, token): - currentNode = self.tree.openElements[-1] - if (token["name"] in self.breakoutElements or - (token["name"] == "font" and - set(token["data"].keys()) & {"color", "face", "size"})): - self.parser.parseError("unexpected-html-element-in-foreign-content", - {"name": token["name"]}) - while (self.tree.openElements[-1].namespace != - self.tree.defaultNamespace and - not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and - not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])): - self.tree.openElements.pop() - return token - - else: - if currentNode.namespace == namespaces["mathml"]: - self.parser.adjustMathMLAttributes(token) - elif currentNode.namespace == namespaces["svg"]: - self.adjustSVGTagNames(token) - self.parser.adjustSVGAttributes(token) - self.parser.adjustForeignAttributes(token) - token["namespace"] = currentNode.namespace - self.tree.insertElement(token) - if token["selfClosing"]: - self.tree.openElements.pop() - token["selfClosingAcknowledged"] = True - - def processEndTag(self, token): - nodeIndex = len(self.tree.openElements) - 1 - node = self.tree.openElements[-1] - if node.name.translate(asciiUpper2Lower) != token["name"]: - self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) - - while True: - if node.name.translate(asciiUpper2Lower) == token["name"]: - # XXX this isn't in the spec but it seems necessary - if self.parser.phase == self.parser.phases["inTableText"]: - self.parser.phase.flushCharacters() - self.parser.phase = self.parser.phase.originalPhase - while self.tree.openElements.pop() != node: - assert self.tree.openElements - new_token = None - break - nodeIndex -= 1 - - node = self.tree.openElements[nodeIndex] - if node.namespace != self.tree.defaultNamespace: - continue - else: - new_token = self.parser.phase.processEndTag(token) - break - return new_token - - class AfterBodyPhase(Phase): - __slots__ = tuple() - - def processEOF(self): - # Stop parsing - pass - - def processComment(self, token): - # This is needed because data is to be appended to the <html> element - # here and not to whatever is currently open. - self.tree.insertComment(token, self.tree.openElements[0]) - - def processCharacters(self, token): - self.parser.parseError("unexpected-char-after-body") - self.parser.phase = self.parser.phases["inBody"] - return token - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("unexpected-start-tag-after-body", - {"name": token["name"]}) - self.parser.phase = self.parser.phases["inBody"] - return token - - def endTagHtml(self, name): - if self.parser.innerHTML: - self.parser.parseError("unexpected-end-tag-after-body-innerhtml") - else: - self.parser.phase = self.parser.phases["afterAfterBody"] - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag-after-body", - {"name": token["name"]}) - self.parser.phase = self.parser.phases["inBody"] - return token - - startTagHandler = _utils.MethodDispatcher([ - ("html", startTagHtml) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([("html", endTagHtml)]) - endTagHandler.default = endTagOther - - class InFramesetPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset - __slots__ = tuple() - - def processEOF(self): - if self.tree.openElements[-1].name != "html": - self.parser.parseError("eof-in-frameset") - else: - assert self.parser.innerHTML - - def processCharacters(self, token): - self.parser.parseError("unexpected-char-in-frameset") - - def startTagFrameset(self, token): - self.tree.insertElement(token) - - def startTagFrame(self, token): - self.tree.insertElement(token) - self.tree.openElements.pop() - - def startTagNoframes(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("unexpected-start-tag-in-frameset", - {"name": token["name"]}) - - def endTagFrameset(self, token): - if self.tree.openElements[-1].name == "html": - # innerHTML case - self.parser.parseError("unexpected-frameset-in-frameset-innerhtml") - else: - self.tree.openElements.pop() - if (not self.parser.innerHTML and - self.tree.openElements[-1].name != "frameset"): - # If we're not in innerHTML mode and the current node is not a - # "frameset" element (anymore) then switch. - self.parser.phase = self.parser.phases["afterFrameset"] - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag-in-frameset", - {"name": token["name"]}) - - startTagHandler = _utils.MethodDispatcher([ - ("html", Phase.startTagHtml), - ("frameset", startTagFrameset), - ("frame", startTagFrame), - ("noframes", startTagNoframes) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - ("frameset", endTagFrameset) - ]) - endTagHandler.default = endTagOther - - class AfterFramesetPhase(Phase): - # http://www.whatwg.org/specs/web-apps/current-work/#after3 - __slots__ = tuple() - - def processEOF(self): - # Stop parsing - pass - - def processCharacters(self, token): - self.parser.parseError("unexpected-char-after-frameset") - - def startTagNoframes(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("unexpected-start-tag-after-frameset", - {"name": token["name"]}) - - def endTagHtml(self, token): - self.parser.phase = self.parser.phases["afterAfterFrameset"] - - def endTagOther(self, token): - self.parser.parseError("unexpected-end-tag-after-frameset", - {"name": token["name"]}) - - startTagHandler = _utils.MethodDispatcher([ - ("html", Phase.startTagHtml), - ("noframes", startTagNoframes) - ]) - startTagHandler.default = startTagOther - - endTagHandler = _utils.MethodDispatcher([ - ("html", endTagHtml) - ]) - endTagHandler.default = endTagOther - - class AfterAfterBodyPhase(Phase): - __slots__ = tuple() - - def processEOF(self): - pass - - def processComment(self, token): - self.tree.insertComment(token, self.tree.document) - - def processSpaceCharacters(self, token): - return self.parser.phases["inBody"].processSpaceCharacters(token) - - def processCharacters(self, token): - self.parser.parseError("expected-eof-but-got-char") - self.parser.phase = self.parser.phases["inBody"] - return token - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("expected-eof-but-got-start-tag", - {"name": token["name"]}) - self.parser.phase = self.parser.phases["inBody"] - return token - - def processEndTag(self, token): - self.parser.parseError("expected-eof-but-got-end-tag", - {"name": token["name"]}) - self.parser.phase = self.parser.phases["inBody"] - return token - - startTagHandler = _utils.MethodDispatcher([ - ("html", startTagHtml) - ]) - startTagHandler.default = startTagOther - - class AfterAfterFramesetPhase(Phase): - __slots__ = tuple() - - def processEOF(self): - pass - - def processComment(self, token): - self.tree.insertComment(token, self.tree.document) - - def processSpaceCharacters(self, token): - return self.parser.phases["inBody"].processSpaceCharacters(token) - - def processCharacters(self, token): - self.parser.parseError("expected-eof-but-got-char") - - def startTagHtml(self, token): - return self.parser.phases["inBody"].processStartTag(token) - - def startTagNoFrames(self, token): - return self.parser.phases["inHead"].processStartTag(token) - - def startTagOther(self, token): - self.parser.parseError("expected-eof-but-got-start-tag", - {"name": token["name"]}) - - def processEndTag(self, token): - self.parser.parseError("expected-eof-but-got-end-tag", - {"name": token["name"]}) - - startTagHandler = _utils.MethodDispatcher([ - ("html", startTagHtml), - ("noframes", startTagNoFrames) - ]) - startTagHandler.default = startTagOther - - # pylint:enable=unused-argument - - return { - "initial": InitialPhase, - "beforeHtml": BeforeHtmlPhase, - "beforeHead": BeforeHeadPhase, - "inHead": InHeadPhase, - "inHeadNoscript": InHeadNoscriptPhase, - "afterHead": AfterHeadPhase, - "inBody": InBodyPhase, - "text": TextPhase, - "inTable": InTablePhase, - "inTableText": InTableTextPhase, - "inCaption": InCaptionPhase, - "inColumnGroup": InColumnGroupPhase, - "inTableBody": InTableBodyPhase, - "inRow": InRowPhase, - "inCell": InCellPhase, - "inSelect": InSelectPhase, - "inSelectInTable": InSelectInTablePhase, - "inForeignContent": InForeignContentPhase, - "afterBody": AfterBodyPhase, - "inFrameset": InFramesetPhase, - "afterFrameset": AfterFramesetPhase, - "afterAfterBody": AfterAfterBodyPhase, - "afterAfterFrameset": AfterAfterFramesetPhase, - # XXX after after frameset - } - - -def adjust_attributes(token, replacements): - needs_adjustment = viewkeys(token['data']) & viewkeys(replacements) - if needs_adjustment: - token['data'] = type(token['data'])((replacements.get(k, k), v) - for k, v in token['data'].items()) - - -def impliedTagToken(name, type="EndTag", attributes=None, - selfClosing=False): - if attributes is None: - attributes = {} - return {"type": tokenTypes[type], "name": name, "data": attributes, - "selfClosing": selfClosing} - - -class ParseError(Exception): - """Error in parsed document""" - pass diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/serializer.py b/env/Lib/site-packages/pip/_vendor/html5lib/serializer.py deleted file mode 100644 index d5669d8c149923d2eb8ac5c5289ecb7d4ed3e532..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/serializer.py +++ /dev/null @@ -1,409 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import text_type - -import re - -from codecs import register_error, xmlcharrefreplace_errors - -from .constants import voidElements, booleanAttributes, spaceCharacters -from .constants import rcdataElements, entities, xmlEntities -from . import treewalkers, _utils -from xml.sax.saxutils import escape - -_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`" -_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]") -_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars + - "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n" - "\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15" - "\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000" - "\u2001\u2002\u2003\u2004\u2005\u2006\u2007" - "\u2008\u2009\u200a\u2028\u2029\u202f\u205f" - "\u3000]") - - -_encode_entity_map = {} -_is_ucs4 = len("\U0010FFFF") == 1 -for k, v in list(entities.items()): - # skip multi-character entities - if ((_is_ucs4 and len(v) > 1) or - (not _is_ucs4 and len(v) > 2)): - continue - if v != "&": - if len(v) == 2: - v = _utils.surrogatePairToCodepoint(v) - else: - v = ord(v) - if v not in _encode_entity_map or k.islower(): - # prefer < over < and similarly for &, >, etc. - _encode_entity_map[v] = k - - -def htmlentityreplace_errors(exc): - if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): - res = [] - codepoints = [] - skip = False - for i, c in enumerate(exc.object[exc.start:exc.end]): - if skip: - skip = False - continue - index = i + exc.start - if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): - codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2]) - skip = True - else: - codepoint = ord(c) - codepoints.append(codepoint) - for cp in codepoints: - e = _encode_entity_map.get(cp) - if e: - res.append("&") - res.append(e) - if not e.endswith(";"): - res.append(";") - else: - res.append("&#x%s;" % (hex(cp)[2:])) - return ("".join(res), exc.end) - else: - return xmlcharrefreplace_errors(exc) - - -register_error("htmlentityreplace", htmlentityreplace_errors) - - -def serialize(input, tree="etree", encoding=None, **serializer_opts): - """Serializes the input token stream using the specified treewalker - - :arg input: the token stream to serialize - - :arg tree: the treewalker to use - - :arg encoding: the encoding to use - - :arg serializer_opts: any options to pass to the - :py:class:`html5lib.serializer.HTMLSerializer` that gets created - - :returns: the tree serialized as a string - - Example: - - >>> from html5lib.html5parser import parse - >>> from html5lib.serializer import serialize - >>> token_stream = parse('<html><body><p>Hi!</p></body></html>') - >>> serialize(token_stream, omit_optional_tags=False) - '<html><head></head><body><p>Hi!</p></body></html>' - - """ - # XXX: Should we cache this? - walker = treewalkers.getTreeWalker(tree) - s = HTMLSerializer(**serializer_opts) - return s.render(walker(input), encoding) - - -class HTMLSerializer(object): - - # attribute quoting options - quote_attr_values = "legacy" # be secure by default - quote_char = '"' - use_best_quote_char = True - - # tag syntax options - omit_optional_tags = True - minimize_boolean_attributes = True - use_trailing_solidus = False - space_before_trailing_solidus = True - - # escaping options - escape_lt_in_attrs = False - escape_rcdata = False - resolve_entities = True - - # miscellaneous options - alphabetical_attributes = False - inject_meta_charset = True - strip_whitespace = False - sanitize = False - - options = ("quote_attr_values", "quote_char", "use_best_quote_char", - "omit_optional_tags", "minimize_boolean_attributes", - "use_trailing_solidus", "space_before_trailing_solidus", - "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", - "alphabetical_attributes", "inject_meta_charset", - "strip_whitespace", "sanitize") - - def __init__(self, **kwargs): - """Initialize HTMLSerializer - - :arg inject_meta_charset: Whether or not to inject the meta charset. - - Defaults to ``True``. - - :arg quote_attr_values: Whether to quote attribute values that don't - require quoting per legacy browser behavior (``"legacy"``), when - required by the standard (``"spec"``), or always (``"always"``). - - Defaults to ``"legacy"``. - - :arg quote_char: Use given quote character for attribute quoting. - - Defaults to ``"`` which will use double quotes unless attribute - value contains a double quote, in which case single quotes are - used. - - :arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute - values. - - Defaults to ``False``. - - :arg escape_rcdata: Whether to escape characters that need to be - escaped within normal elements within rcdata elements such as - style. - - Defaults to ``False``. - - :arg resolve_entities: Whether to resolve named character entities that - appear in the source tree. The XML predefined entities < > - & " ' are unaffected by this setting. - - Defaults to ``True``. - - :arg strip_whitespace: Whether to remove semantically meaningless - whitespace. (This compresses all whitespace to a single space - except within ``pre``.) - - Defaults to ``False``. - - :arg minimize_boolean_attributes: Shortens boolean attributes to give - just the attribute value, for example:: - - <input disabled="disabled"> - - becomes:: - - <input disabled> - - Defaults to ``True``. - - :arg use_trailing_solidus: Includes a close-tag slash at the end of the - start tag of void elements (empty elements whose end tag is - forbidden). E.g. ``<hr/>``. - - Defaults to ``False``. - - :arg space_before_trailing_solidus: Places a space immediately before - the closing slash in a tag using a trailing solidus. E.g. - ``<hr />``. Requires ``use_trailing_solidus=True``. - - Defaults to ``True``. - - :arg sanitize: Strip all unsafe or unknown constructs from output. - See :py:class:`html5lib.filters.sanitizer.Filter`. - - Defaults to ``False``. - - :arg omit_optional_tags: Omit start/end tags that are optional. - - Defaults to ``True``. - - :arg alphabetical_attributes: Reorder attributes to be in alphabetical order. - - Defaults to ``False``. - - """ - unexpected_args = frozenset(kwargs) - frozenset(self.options) - if len(unexpected_args) > 0: - raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args))) - if 'quote_char' in kwargs: - self.use_best_quote_char = False - for attr in self.options: - setattr(self, attr, kwargs.get(attr, getattr(self, attr))) - self.errors = [] - self.strict = False - - def encode(self, string): - assert(isinstance(string, text_type)) - if self.encoding: - return string.encode(self.encoding, "htmlentityreplace") - else: - return string - - def encodeStrict(self, string): - assert(isinstance(string, text_type)) - if self.encoding: - return string.encode(self.encoding, "strict") - else: - return string - - def serialize(self, treewalker, encoding=None): - # pylint:disable=too-many-nested-blocks - self.encoding = encoding - in_cdata = False - self.errors = [] - - if encoding and self.inject_meta_charset: - from .filters.inject_meta_charset import Filter - treewalker = Filter(treewalker, encoding) - # Alphabetical attributes is here under the assumption that none of - # the later filters add or change order of attributes; it needs to be - # before the sanitizer so escaped elements come out correctly - if self.alphabetical_attributes: - from .filters.alphabeticalattributes import Filter - treewalker = Filter(treewalker) - # WhitespaceFilter should be used before OptionalTagFilter - # for maximum efficiently of this latter filter - if self.strip_whitespace: - from .filters.whitespace import Filter - treewalker = Filter(treewalker) - if self.sanitize: - from .filters.sanitizer import Filter - treewalker = Filter(treewalker) - if self.omit_optional_tags: - from .filters.optionaltags import Filter - treewalker = Filter(treewalker) - - for token in treewalker: - type = token["type"] - if type == "Doctype": - doctype = "<!DOCTYPE %s" % token["name"] - - if token["publicId"]: - doctype += ' PUBLIC "%s"' % token["publicId"] - elif token["systemId"]: - doctype += " SYSTEM" - if token["systemId"]: - if token["systemId"].find('"') >= 0: - if token["systemId"].find("'") >= 0: - self.serializeError("System identifier contains both single and double quote characters") - quote_char = "'" - else: - quote_char = '"' - doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) - - doctype += ">" - yield self.encodeStrict(doctype) - - elif type in ("Characters", "SpaceCharacters"): - if type == "SpaceCharacters" or in_cdata: - if in_cdata and token["data"].find("</") >= 0: - self.serializeError("Unexpected </ in CDATA") - yield self.encode(token["data"]) - else: - yield self.encode(escape(token["data"])) - - elif type in ("StartTag", "EmptyTag"): - name = token["name"] - yield self.encodeStrict("<%s" % name) - if name in rcdataElements and not self.escape_rcdata: - in_cdata = True - elif in_cdata: - self.serializeError("Unexpected child element of a CDATA element") - for (_, attr_name), attr_value in token["data"].items(): - # TODO: Add namespace support here - k = attr_name - v = attr_value - yield self.encodeStrict(' ') - - yield self.encodeStrict(k) - if not self.minimize_boolean_attributes or \ - (k not in booleanAttributes.get(name, tuple()) and - k not in booleanAttributes.get("", tuple())): - yield self.encodeStrict("=") - if self.quote_attr_values == "always" or len(v) == 0: - quote_attr = True - elif self.quote_attr_values == "spec": - quote_attr = _quoteAttributeSpec.search(v) is not None - elif self.quote_attr_values == "legacy": - quote_attr = _quoteAttributeLegacy.search(v) is not None - else: - raise ValueError("quote_attr_values must be one of: " - "'always', 'spec', or 'legacy'") - v = v.replace("&", "&") - if self.escape_lt_in_attrs: - v = v.replace("<", "<") - if quote_attr: - quote_char = self.quote_char - if self.use_best_quote_char: - if "'" in v and '"' not in v: - quote_char = '"' - elif '"' in v and "'" not in v: - quote_char = "'" - if quote_char == "'": - v = v.replace("'", "'") - else: - v = v.replace('"', """) - yield self.encodeStrict(quote_char) - yield self.encode(v) - yield self.encodeStrict(quote_char) - else: - yield self.encode(v) - if name in voidElements and self.use_trailing_solidus: - if self.space_before_trailing_solidus: - yield self.encodeStrict(" /") - else: - yield self.encodeStrict("/") - yield self.encode(">") - - elif type == "EndTag": - name = token["name"] - if name in rcdataElements: - in_cdata = False - elif in_cdata: - self.serializeError("Unexpected child element of a CDATA element") - yield self.encodeStrict("</%s>" % name) - - elif type == "Comment": - data = token["data"] - if data.find("--") >= 0: - self.serializeError("Comment contains --") - yield self.encodeStrict("<!--%s-->" % token["data"]) - - elif type == "Entity": - name = token["name"] - key = name + ";" - if key not in entities: - self.serializeError("Entity %s not recognized" % name) - if self.resolve_entities and key not in xmlEntities: - data = entities[key] - else: - data = "&%s;" % name - yield self.encodeStrict(data) - - else: - self.serializeError(token["data"]) - - def render(self, treewalker, encoding=None): - """Serializes the stream from the treewalker into a string - - :arg treewalker: the treewalker to serialize - - :arg encoding: the string encoding to use - - :returns: the serialized tree - - Example: - - >>> from html5lib import parse, getTreeWalker - >>> from html5lib.serializer import HTMLSerializer - >>> token_stream = parse('<html><body>Hi!</body></html>') - >>> walker = getTreeWalker('etree') - >>> serializer = HTMLSerializer(omit_optional_tags=False) - >>> serializer.render(walker(token_stream)) - '<html><head></head><body>Hi!</body></html>' - - """ - if encoding: - return b"".join(list(self.serialize(treewalker, encoding))) - else: - return "".join(list(self.serialize(treewalker))) - - def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): - # XXX The idea is to make data mandatory. - self.errors.append(data) - if self.strict: - raise SerializeError - - -class SerializeError(Exception): - """Error in serialized tree""" - pass diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py b/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py deleted file mode 100644 index 7ef59590c76ee75733d78b061d4108d49f209ee5..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Tree adapters let you convert from one tree structure to another - -Example: - -.. code-block:: python - - from pip._vendor import html5lib - from pip._vendor.html5lib.treeadapters import genshi - - doc = '<html><body>Hi!</body></html>' - treebuilder = html5lib.getTreeBuilder('etree') - parser = html5lib.HTMLParser(tree=treebuilder) - tree = parser.parse(doc) - TreeWalker = html5lib.getTreeWalker('etree') - - genshi_tree = genshi.to_genshi(TreeWalker(tree)) - -""" -from __future__ import absolute_import, division, unicode_literals - -from . import sax - -__all__ = ["sax"] - -try: - from . import genshi # noqa -except ImportError: - pass -else: - __all__.append("genshi") diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 99d0654679c79fd13cc76e246369315b203af4f9..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-39.pyc deleted file mode 100644 index 9dfe76138922c1c7cbb7b935f57cfb39d948da1d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-39.pyc deleted file mode 100644 index 1c945ee3bd3c419a2ffdbf318d8757bae274d674..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py b/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py deleted file mode 100644 index 61d5fb6ac42ca4152f056d996af0cb0b0d2ddc35..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from genshi.core import QName, Attrs -from genshi.core import START, END, TEXT, COMMENT, DOCTYPE - - -def to_genshi(walker): - """Convert a tree to a genshi tree - - :arg walker: the treewalker to use to walk the tree to convert it - - :returns: generator of genshi nodes - - """ - text = [] - for token in walker: - type = token["type"] - if type in ("Characters", "SpaceCharacters"): - text.append(token["data"]) - elif text: - yield TEXT, "".join(text), (None, -1, -1) - text = [] - - if type in ("StartTag", "EmptyTag"): - if token["namespace"]: - name = "{%s}%s" % (token["namespace"], token["name"]) - else: - name = token["name"] - attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value) - for attr, value in token["data"].items()]) - yield (START, (QName(name), attrs), (None, -1, -1)) - if type == "EmptyTag": - type = "EndTag" - - if type == "EndTag": - if token["namespace"]: - name = "{%s}%s" % (token["namespace"], token["name"]) - else: - name = token["name"] - - yield END, QName(name), (None, -1, -1) - - elif type == "Comment": - yield COMMENT, token["data"], (None, -1, -1) - - elif type == "Doctype": - yield DOCTYPE, (token["name"], token["publicId"], - token["systemId"]), (None, -1, -1) - - else: - pass # FIXME: What to do? - - if text: - yield TEXT, "".join(text), (None, -1, -1) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py b/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py deleted file mode 100644 index f4ccea5a25653dd9e4c1bcf1047f407184562a1b..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from xml.sax.xmlreader import AttributesNSImpl - -from ..constants import adjustForeignAttributes, unadjustForeignAttributes - -prefix_mapping = {} -for prefix, localName, namespace in adjustForeignAttributes.values(): - if prefix is not None: - prefix_mapping[prefix] = namespace - - -def to_sax(walker, handler): - """Call SAX-like content handler based on treewalker walker - - :arg walker: the treewalker to use to walk the tree to convert it - - :arg handler: SAX handler to use - - """ - handler.startDocument() - for prefix, namespace in prefix_mapping.items(): - handler.startPrefixMapping(prefix, namespace) - - for token in walker: - type = token["type"] - if type == "Doctype": - continue - elif type in ("StartTag", "EmptyTag"): - attrs = AttributesNSImpl(token["data"], - unadjustForeignAttributes) - handler.startElementNS((token["namespace"], token["name"]), - token["name"], - attrs) - if type == "EmptyTag": - handler.endElementNS((token["namespace"], token["name"]), - token["name"]) - elif type == "EndTag": - handler.endElementNS((token["namespace"], token["name"]), - token["name"]) - elif type in ("Characters", "SpaceCharacters"): - handler.characters(token["data"]) - elif type == "Comment": - pass - else: - assert False, "Unknown token type" - - for prefix, namespace in prefix_mapping.items(): - handler.endPrefixMapping(prefix) - handler.endDocument() diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py b/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py deleted file mode 100644 index d44447eaf5a3912ea699e6d895d51f9b0782cfba..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py +++ /dev/null @@ -1,88 +0,0 @@ -"""A collection of modules for building different kinds of trees from HTML -documents. - -To create a treebuilder for a new type of tree, you need to do -implement several things: - -1. A set of classes for various types of elements: Document, Doctype, Comment, - Element. These must implement the interface of ``base.treebuilders.Node`` - (although comment nodes have a different signature for their constructor, - see ``treebuilders.etree.Comment``) Textual content may also be implemented - as another node type, or not, as your tree implementation requires. - -2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits - from ``treebuilders.base.TreeBuilder``. This has 4 required attributes: - - * ``documentClass`` - the class to use for the bottommost node of a document - * ``elementClass`` - the class to use for HTML Elements - * ``commentClass`` - the class to use for comments - * ``doctypeClass`` - the class to use for doctypes - - It also has one required method: - - * ``getDocument`` - Returns the root node of the complete document tree - -3. If you wish to run the unit tests, you must also create a ``testSerializer`` - method on your treebuilder which accepts a node and returns a string - containing Node and its children serialized according to the format used in - the unittests - -""" - -from __future__ import absolute_import, division, unicode_literals - -from .._utils import default_etree - -treeBuilderCache = {} - - -def getTreeBuilder(treeType, implementation=None, **kwargs): - """Get a TreeBuilder class for various types of trees with built-in support - - :arg treeType: the name of the tree type required (case-insensitive). Supported - values are: - - * "dom" - A generic builder for DOM implementations, defaulting to a - xml.dom.minidom based implementation. - * "etree" - A generic builder for tree implementations exposing an - ElementTree-like interface, defaulting to xml.etree.cElementTree if - available and xml.etree.ElementTree if not. - * "lxml" - A etree-based builder for lxml.etree, handling limitations - of lxml's implementation. - - :arg implementation: (Currently applies to the "etree" and "dom" tree - types). A module implementing the tree type e.g. xml.etree.ElementTree - or xml.etree.cElementTree. - - :arg kwargs: Any additional options to pass to the TreeBuilder when - creating it. - - Example: - - >>> from html5lib.treebuilders import getTreeBuilder - >>> builder = getTreeBuilder('etree') - - """ - - treeType = treeType.lower() - if treeType not in treeBuilderCache: - if treeType == "dom": - from . import dom - # Come up with a sane default (pref. from the stdlib) - if implementation is None: - from xml.dom import minidom - implementation = minidom - # NEVER cache here, caching is done in the dom submodule - return dom.getDomModule(implementation, **kwargs).TreeBuilder - elif treeType == "lxml": - from . import etree_lxml - treeBuilderCache[treeType] = etree_lxml.TreeBuilder - elif treeType == "etree": - from . import etree - if implementation is None: - implementation = default_etree - # NEVER cache here, caching is done in the etree submodule - return etree.getETreeModule(implementation, **kwargs).TreeBuilder - else: - raise ValueError("""Unrecognised treebuilder "%s" """ % treeType) - return treeBuilderCache.get(treeType) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 3804fd8036fd70557bf6a8df2be70d28b2b44c3d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-39.pyc deleted file mode 100644 index e4928bd3a95a7fa0c5153de3137e3d337a6b9728..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-39.pyc deleted file mode 100644 index 47664e6b588d9feebf437bcfe5c864839a3e6a21..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-39.pyc deleted file mode 100644 index fcc114b380aee1df39d9a2b8e2be72ad0b24bc7b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-39.pyc deleted file mode 100644 index 328dbb6ca730053534aeb3d2e7b477426d2afae8..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/base.py b/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/base.py deleted file mode 100644 index 965fce29d3b9e01e9e9374a3d6318badeca7e1e1..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/base.py +++ /dev/null @@ -1,417 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import text_type - -from ..constants import scopingElements, tableInsertModeElements, namespaces - -# The scope markers are inserted when entering object elements, -# marquees, table cells, and table captions, and are used to prevent formatting -# from "leaking" into tables, object elements, and marquees. -Marker = None - -listElementsMap = { - None: (frozenset(scopingElements), False), - "button": (frozenset(scopingElements | {(namespaces["html"], "button")}), False), - "list": (frozenset(scopingElements | {(namespaces["html"], "ol"), - (namespaces["html"], "ul")}), False), - "table": (frozenset([(namespaces["html"], "html"), - (namespaces["html"], "table")]), False), - "select": (frozenset([(namespaces["html"], "optgroup"), - (namespaces["html"], "option")]), True) -} - - -class Node(object): - """Represents an item in the tree""" - def __init__(self, name): - """Creates a Node - - :arg name: The tag name associated with the node - - """ - # The tag name associated with the node - self.name = name - # The parent of the current node (or None for the document node) - self.parent = None - # The value of the current node (applies to text nodes and comments) - self.value = None - # A dict holding name -> value pairs for attributes of the node - self.attributes = {} - # A list of child nodes of the current node. This must include all - # elements but not necessarily other node types. - self.childNodes = [] - # A list of miscellaneous flags that can be set on the node. - self._flags = [] - - def __str__(self): - attributesStr = " ".join(["%s=\"%s\"" % (name, value) - for name, value in - self.attributes.items()]) - if attributesStr: - return "<%s %s>" % (self.name, attributesStr) - else: - return "<%s>" % (self.name) - - def __repr__(self): - return "<%s>" % (self.name) - - def appendChild(self, node): - """Insert node as a child of the current node - - :arg node: the node to insert - - """ - raise NotImplementedError - - def insertText(self, data, insertBefore=None): - """Insert data as text in the current node, positioned before the - start of node insertBefore or to the end of the node's text. - - :arg data: the data to insert - - :arg insertBefore: True if you want to insert the text before the node - and False if you want to insert it after the node - - """ - raise NotImplementedError - - def insertBefore(self, node, refNode): - """Insert node as a child of the current node, before refNode in the - list of child nodes. Raises ValueError if refNode is not a child of - the current node - - :arg node: the node to insert - - :arg refNode: the child node to insert the node before - - """ - raise NotImplementedError - - def removeChild(self, node): - """Remove node from the children of the current node - - :arg node: the child node to remove - - """ - raise NotImplementedError - - def reparentChildren(self, newParent): - """Move all the children of the current node to newParent. - This is needed so that trees that don't store text as nodes move the - text in the correct way - - :arg newParent: the node to move all this node's children to - - """ - # XXX - should this method be made more general? - for child in self.childNodes: - newParent.appendChild(child) - self.childNodes = [] - - def cloneNode(self): - """Return a shallow copy of the current node i.e. a node with the same - name and attributes but with no parent or child nodes - """ - raise NotImplementedError - - def hasContent(self): - """Return true if the node has children or text, false otherwise - """ - raise NotImplementedError - - -class ActiveFormattingElements(list): - def append(self, node): - equalCount = 0 - if node != Marker: - for element in self[::-1]: - if element == Marker: - break - if self.nodesEqual(element, node): - equalCount += 1 - if equalCount == 3: - self.remove(element) - break - list.append(self, node) - - def nodesEqual(self, node1, node2): - if not node1.nameTuple == node2.nameTuple: - return False - - if not node1.attributes == node2.attributes: - return False - - return True - - -class TreeBuilder(object): - """Base treebuilder implementation - - * documentClass - the class to use for the bottommost node of a document - * elementClass - the class to use for HTML Elements - * commentClass - the class to use for comments - * doctypeClass - the class to use for doctypes - - """ - # pylint:disable=not-callable - - # Document class - documentClass = None - - # The class to use for creating a node - elementClass = None - - # The class to use for creating comments - commentClass = None - - # The class to use for creating doctypes - doctypeClass = None - - # Fragment class - fragmentClass = None - - def __init__(self, namespaceHTMLElements): - """Create a TreeBuilder - - :arg namespaceHTMLElements: whether or not to namespace HTML elements - - """ - if namespaceHTMLElements: - self.defaultNamespace = "http://www.w3.org/1999/xhtml" - else: - self.defaultNamespace = None - self.reset() - - def reset(self): - self.openElements = [] - self.activeFormattingElements = ActiveFormattingElements() - - # XXX - rename these to headElement, formElement - self.headPointer = None - self.formPointer = None - - self.insertFromTable = False - - self.document = self.documentClass() - - def elementInScope(self, target, variant=None): - - # If we pass a node in we match that. if we pass a string - # match any node with that name - exactNode = hasattr(target, "nameTuple") - if not exactNode: - if isinstance(target, text_type): - target = (namespaces["html"], target) - assert isinstance(target, tuple) - - listElements, invert = listElementsMap[variant] - - for node in reversed(self.openElements): - if exactNode and node == target: - return True - elif not exactNode and node.nameTuple == target: - return True - elif (invert ^ (node.nameTuple in listElements)): - return False - - assert False # We should never reach this point - - def reconstructActiveFormattingElements(self): - # Within this algorithm the order of steps described in the - # specification is not quite the same as the order of steps in the - # code. It should still do the same though. - - # Step 1: stop the algorithm when there's nothing to do. - if not self.activeFormattingElements: - return - - # Step 2 and step 3: we start with the last element. So i is -1. - i = len(self.activeFormattingElements) - 1 - entry = self.activeFormattingElements[i] - if entry == Marker or entry in self.openElements: - return - - # Step 6 - while entry != Marker and entry not in self.openElements: - if i == 0: - # This will be reset to 0 below - i = -1 - break - i -= 1 - # Step 5: let entry be one earlier in the list. - entry = self.activeFormattingElements[i] - - while True: - # Step 7 - i += 1 - - # Step 8 - entry = self.activeFormattingElements[i] - clone = entry.cloneNode() # Mainly to get a new copy of the attributes - - # Step 9 - element = self.insertElement({"type": "StartTag", - "name": clone.name, - "namespace": clone.namespace, - "data": clone.attributes}) - - # Step 10 - self.activeFormattingElements[i] = element - - # Step 11 - if element == self.activeFormattingElements[-1]: - break - - def clearActiveFormattingElements(self): - entry = self.activeFormattingElements.pop() - while self.activeFormattingElements and entry != Marker: - entry = self.activeFormattingElements.pop() - - def elementInActiveFormattingElements(self, name): - """Check if an element exists between the end of the active - formatting elements and the last marker. If it does, return it, else - return false""" - - for item in self.activeFormattingElements[::-1]: - # Check for Marker first because if it's a Marker it doesn't have a - # name attribute. - if item == Marker: - break - elif item.name == name: - return item - return False - - def insertRoot(self, token): - element = self.createElement(token) - self.openElements.append(element) - self.document.appendChild(element) - - def insertDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - - doctype = self.doctypeClass(name, publicId, systemId) - self.document.appendChild(doctype) - - def insertComment(self, token, parent=None): - if parent is None: - parent = self.openElements[-1] - parent.appendChild(self.commentClass(token["data"])) - - def createElement(self, token): - """Create an element but don't insert it anywhere""" - name = token["name"] - namespace = token.get("namespace", self.defaultNamespace) - element = self.elementClass(name, namespace) - element.attributes = token["data"] - return element - - def _getInsertFromTable(self): - return self._insertFromTable - - def _setInsertFromTable(self, value): - """Switch the function used to insert an element from the - normal one to the misnested table one and back again""" - self._insertFromTable = value - if value: - self.insertElement = self.insertElementTable - else: - self.insertElement = self.insertElementNormal - - insertFromTable = property(_getInsertFromTable, _setInsertFromTable) - - def insertElementNormal(self, token): - name = token["name"] - assert isinstance(name, text_type), "Element %s not unicode" % name - namespace = token.get("namespace", self.defaultNamespace) - element = self.elementClass(name, namespace) - element.attributes = token["data"] - self.openElements[-1].appendChild(element) - self.openElements.append(element) - return element - - def insertElementTable(self, token): - """Create an element and insert it into the tree""" - element = self.createElement(token) - if self.openElements[-1].name not in tableInsertModeElements: - return self.insertElementNormal(token) - else: - # We should be in the InTable mode. This means we want to do - # special magic element rearranging - parent, insertBefore = self.getTableMisnestedNodePosition() - if insertBefore is None: - parent.appendChild(element) - else: - parent.insertBefore(element, insertBefore) - self.openElements.append(element) - return element - - def insertText(self, data, parent=None): - """Insert text data.""" - if parent is None: - parent = self.openElements[-1] - - if (not self.insertFromTable or (self.insertFromTable and - self.openElements[-1].name - not in tableInsertModeElements)): - parent.insertText(data) - else: - # We should be in the InTable mode. This means we want to do - # special magic element rearranging - parent, insertBefore = self.getTableMisnestedNodePosition() - parent.insertText(data, insertBefore) - - def getTableMisnestedNodePosition(self): - """Get the foster parent element, and sibling to insert before - (or None) when inserting a misnested table node""" - # The foster parent element is the one which comes before the most - # recently opened table element - # XXX - this is really inelegant - lastTable = None - fosterParent = None - insertBefore = None - for elm in self.openElements[::-1]: - if elm.name == "table": - lastTable = elm - break - if lastTable: - # XXX - we should really check that this parent is actually a - # node here - if lastTable.parent: - fosterParent = lastTable.parent - insertBefore = lastTable - else: - fosterParent = self.openElements[ - self.openElements.index(lastTable) - 1] - else: - fosterParent = self.openElements[0] - return fosterParent, insertBefore - - def generateImpliedEndTags(self, exclude=None): - name = self.openElements[-1].name - # XXX td, th and tr are not actually needed - if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and - name != exclude): - self.openElements.pop() - # XXX This is not entirely what the specification says. We should - # investigate it more closely. - self.generateImpliedEndTags(exclude) - - def getDocument(self): - """Return the final tree""" - return self.document - - def getFragment(self): - """Return the final fragment""" - # assert self.innerHTML - fragment = self.fragmentClass() - self.openElements[0].reparentChildren(fragment) - return fragment - - def testSerializer(self, node): - """Serialize the subtree of node in the format required by unit tests - - :arg node: the node from which to start serializing - - """ - raise NotImplementedError diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/dom.py b/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/dom.py deleted file mode 100644 index d8b5300465bd475d9d2d1fd87e52ff867de3a445..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/dom.py +++ /dev/null @@ -1,239 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - - -try: - from collections.abc import MutableMapping -except ImportError: # Python 2.7 - from collections import MutableMapping -from xml.dom import minidom, Node -import weakref - -from . import base -from .. import constants -from ..constants import namespaces -from .._utils import moduleFactoryFactory - - -def getDomBuilder(DomImplementation): - Dom = DomImplementation - - class AttrList(MutableMapping): - def __init__(self, element): - self.element = element - - def __iter__(self): - return iter(self.element.attributes.keys()) - - def __setitem__(self, name, value): - if isinstance(name, tuple): - raise NotImplementedError - else: - attr = self.element.ownerDocument.createAttribute(name) - attr.value = value - self.element.attributes[name] = attr - - def __len__(self): - return len(self.element.attributes) - - def items(self): - return list(self.element.attributes.items()) - - def values(self): - return list(self.element.attributes.values()) - - def __getitem__(self, name): - if isinstance(name, tuple): - raise NotImplementedError - else: - return self.element.attributes[name].value - - def __delitem__(self, name): - if isinstance(name, tuple): - raise NotImplementedError - else: - del self.element.attributes[name] - - class NodeBuilder(base.Node): - def __init__(self, element): - base.Node.__init__(self, element.nodeName) - self.element = element - - namespace = property(lambda self: hasattr(self.element, "namespaceURI") and - self.element.namespaceURI or None) - - def appendChild(self, node): - node.parent = self - self.element.appendChild(node.element) - - def insertText(self, data, insertBefore=None): - text = self.element.ownerDocument.createTextNode(data) - if insertBefore: - self.element.insertBefore(text, insertBefore.element) - else: - self.element.appendChild(text) - - def insertBefore(self, node, refNode): - self.element.insertBefore(node.element, refNode.element) - node.parent = self - - def removeChild(self, node): - if node.element.parentNode == self.element: - self.element.removeChild(node.element) - node.parent = None - - def reparentChildren(self, newParent): - while self.element.hasChildNodes(): - child = self.element.firstChild - self.element.removeChild(child) - newParent.element.appendChild(child) - self.childNodes = [] - - def getAttributes(self): - return AttrList(self.element) - - def setAttributes(self, attributes): - if attributes: - for name, value in list(attributes.items()): - if isinstance(name, tuple): - if name[0] is not None: - qualifiedName = (name[0] + ":" + name[1]) - else: - qualifiedName = name[1] - self.element.setAttributeNS(name[2], qualifiedName, - value) - else: - self.element.setAttribute( - name, value) - attributes = property(getAttributes, setAttributes) - - def cloneNode(self): - return NodeBuilder(self.element.cloneNode(False)) - - def hasContent(self): - return self.element.hasChildNodes() - - def getNameTuple(self): - if self.namespace is None: - return namespaces["html"], self.name - else: - return self.namespace, self.name - - nameTuple = property(getNameTuple) - - class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable - def documentClass(self): - self.dom = Dom.getDOMImplementation().createDocument(None, None, None) - return weakref.proxy(self) - - def insertDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - - domimpl = Dom.getDOMImplementation() - doctype = domimpl.createDocumentType(name, publicId, systemId) - self.document.appendChild(NodeBuilder(doctype)) - if Dom == minidom: - doctype.ownerDocument = self.dom - - def elementClass(self, name, namespace=None): - if namespace is None and self.defaultNamespace is None: - node = self.dom.createElement(name) - else: - node = self.dom.createElementNS(namespace, name) - - return NodeBuilder(node) - - def commentClass(self, data): - return NodeBuilder(self.dom.createComment(data)) - - def fragmentClass(self): - return NodeBuilder(self.dom.createDocumentFragment()) - - def appendChild(self, node): - self.dom.appendChild(node.element) - - def testSerializer(self, element): - return testSerializer(element) - - def getDocument(self): - return self.dom - - def getFragment(self): - return base.TreeBuilder.getFragment(self).element - - def insertText(self, data, parent=None): - data = data - if parent != self: - base.TreeBuilder.insertText(self, data, parent) - else: - # HACK: allow text nodes as children of the document node - if hasattr(self.dom, '_child_node_types'): - # pylint:disable=protected-access - if Node.TEXT_NODE not in self.dom._child_node_types: - self.dom._child_node_types = list(self.dom._child_node_types) - self.dom._child_node_types.append(Node.TEXT_NODE) - self.dom.appendChild(self.dom.createTextNode(data)) - - implementation = DomImplementation - name = None - - def testSerializer(element): - element.normalize() - rv = [] - - def serializeElement(element, indent=0): - if element.nodeType == Node.DOCUMENT_TYPE_NODE: - if element.name: - if element.publicId or element.systemId: - publicId = element.publicId or "" - systemId = element.systemId or "" - rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" % - (' ' * indent, element.name, publicId, systemId)) - else: - rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name)) - else: - rv.append("|%s<!DOCTYPE >" % (' ' * indent,)) - elif element.nodeType == Node.DOCUMENT_NODE: - rv.append("#document") - elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE: - rv.append("#document-fragment") - elif element.nodeType == Node.COMMENT_NODE: - rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue)) - elif element.nodeType == Node.TEXT_NODE: - rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue)) - else: - if (hasattr(element, "namespaceURI") and - element.namespaceURI is not None): - name = "%s %s" % (constants.prefixes[element.namespaceURI], - element.nodeName) - else: - name = element.nodeName - rv.append("|%s<%s>" % (' ' * indent, name)) - if element.hasAttributes(): - attributes = [] - for i in range(len(element.attributes)): - attr = element.attributes.item(i) - name = attr.nodeName - value = attr.value - ns = attr.namespaceURI - if ns: - name = "%s %s" % (constants.prefixes[ns], attr.localName) - else: - name = attr.nodeName - attributes.append((name, value)) - - for name, value in sorted(attributes): - rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) - indent += 2 - for child in element.childNodes: - serializeElement(child, indent) - serializeElement(element, 0) - - return "\n".join(rv) - - return locals() - - -# The actual means to get a module! -getDomModule = moduleFactoryFactory(getDomBuilder) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree.py b/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree.py deleted file mode 100644 index ea92dc301fe3fcf2ec9839c39c7844ae9f5df614..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree.py +++ /dev/null @@ -1,343 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -# pylint:disable=protected-access - -from pip._vendor.six import text_type - -import re - -from copy import copy - -from . import base -from .. import _ihatexml -from .. import constants -from ..constants import namespaces -from .._utils import moduleFactoryFactory - -tag_regexp = re.compile("{([^}]*)}(.*)") - - -def getETreeBuilder(ElementTreeImplementation, fullTree=False): - ElementTree = ElementTreeImplementation - ElementTreeCommentType = ElementTree.Comment("asd").tag - - class Element(base.Node): - def __init__(self, name, namespace=None): - self._name = name - self._namespace = namespace - self._element = ElementTree.Element(self._getETreeTag(name, - namespace)) - if namespace is None: - self.nameTuple = namespaces["html"], self._name - else: - self.nameTuple = self._namespace, self._name - self.parent = None - self._childNodes = [] - self._flags = [] - - def _getETreeTag(self, name, namespace): - if namespace is None: - etree_tag = name - else: - etree_tag = "{%s}%s" % (namespace, name) - return etree_tag - - def _setName(self, name): - self._name = name - self._element.tag = self._getETreeTag(self._name, self._namespace) - - def _getName(self): - return self._name - - name = property(_getName, _setName) - - def _setNamespace(self, namespace): - self._namespace = namespace - self._element.tag = self._getETreeTag(self._name, self._namespace) - - def _getNamespace(self): - return self._namespace - - namespace = property(_getNamespace, _setNamespace) - - def _getAttributes(self): - return self._element.attrib - - def _setAttributes(self, attributes): - el_attrib = self._element.attrib - el_attrib.clear() - if attributes: - # calling .items _always_ allocates, and the above truthy check is cheaper than the - # allocation on average - for key, value in attributes.items(): - if isinstance(key, tuple): - name = "{%s}%s" % (key[2], key[1]) - else: - name = key - el_attrib[name] = value - - attributes = property(_getAttributes, _setAttributes) - - def _getChildNodes(self): - return self._childNodes - - def _setChildNodes(self, value): - del self._element[:] - self._childNodes = [] - for element in value: - self.insertChild(element) - - childNodes = property(_getChildNodes, _setChildNodes) - - def hasContent(self): - """Return true if the node has children or text""" - return bool(self._element.text or len(self._element)) - - def appendChild(self, node): - self._childNodes.append(node) - self._element.append(node._element) - node.parent = self - - def insertBefore(self, node, refNode): - index = list(self._element).index(refNode._element) - self._element.insert(index, node._element) - node.parent = self - - def removeChild(self, node): - self._childNodes.remove(node) - self._element.remove(node._element) - node.parent = None - - def insertText(self, data, insertBefore=None): - if not(len(self._element)): - if not self._element.text: - self._element.text = "" - self._element.text += data - elif insertBefore is None: - # Insert the text as the tail of the last child element - if not self._element[-1].tail: - self._element[-1].tail = "" - self._element[-1].tail += data - else: - # Insert the text before the specified node - children = list(self._element) - index = children.index(insertBefore._element) - if index > 0: - if not self._element[index - 1].tail: - self._element[index - 1].tail = "" - self._element[index - 1].tail += data - else: - if not self._element.text: - self._element.text = "" - self._element.text += data - - def cloneNode(self): - element = type(self)(self.name, self.namespace) - if self._element.attrib: - element._element.attrib = copy(self._element.attrib) - return element - - def reparentChildren(self, newParent): - if newParent.childNodes: - newParent.childNodes[-1]._element.tail += self._element.text - else: - if not newParent._element.text: - newParent._element.text = "" - if self._element.text is not None: - newParent._element.text += self._element.text - self._element.text = "" - base.Node.reparentChildren(self, newParent) - - class Comment(Element): - def __init__(self, data): - # Use the superclass constructor to set all properties on the - # wrapper element - self._element = ElementTree.Comment(data) - self.parent = None - self._childNodes = [] - self._flags = [] - - def _getData(self): - return self._element.text - - def _setData(self, value): - self._element.text = value - - data = property(_getData, _setData) - - class DocumentType(Element): - def __init__(self, name, publicId, systemId): - Element.__init__(self, "<!DOCTYPE>") - self._element.text = name - self.publicId = publicId - self.systemId = systemId - - def _getPublicId(self): - return self._element.get("publicId", "") - - def _setPublicId(self, value): - if value is not None: - self._element.set("publicId", value) - - publicId = property(_getPublicId, _setPublicId) - - def _getSystemId(self): - return self._element.get("systemId", "") - - def _setSystemId(self, value): - if value is not None: - self._element.set("systemId", value) - - systemId = property(_getSystemId, _setSystemId) - - class Document(Element): - def __init__(self): - Element.__init__(self, "DOCUMENT_ROOT") - - class DocumentFragment(Element): - def __init__(self): - Element.__init__(self, "DOCUMENT_FRAGMENT") - - def testSerializer(element): - rv = [] - - def serializeElement(element, indent=0): - if not(hasattr(element, "tag")): - element = element.getroot() - if element.tag == "<!DOCTYPE>": - if element.get("publicId") or element.get("systemId"): - publicId = element.get("publicId") or "" - systemId = element.get("systemId") or "" - rv.append("""<!DOCTYPE %s "%s" "%s">""" % - (element.text, publicId, systemId)) - else: - rv.append("<!DOCTYPE %s>" % (element.text,)) - elif element.tag == "DOCUMENT_ROOT": - rv.append("#document") - if element.text is not None: - rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) - if element.tail is not None: - raise TypeError("Document node cannot have tail") - if hasattr(element, "attrib") and len(element.attrib): - raise TypeError("Document node cannot have attributes") - elif element.tag == ElementTreeCommentType: - rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) - else: - assert isinstance(element.tag, text_type), \ - "Expected unicode, got %s, %s" % (type(element.tag), element.tag) - nsmatch = tag_regexp.match(element.tag) - - if nsmatch is None: - name = element.tag - else: - ns, name = nsmatch.groups() - prefix = constants.prefixes[ns] - name = "%s %s" % (prefix, name) - rv.append("|%s<%s>" % (' ' * indent, name)) - - if hasattr(element, "attrib"): - attributes = [] - for name, value in element.attrib.items(): - nsmatch = tag_regexp.match(name) - if nsmatch is not None: - ns, name = nsmatch.groups() - prefix = constants.prefixes[ns] - attr_string = "%s %s" % (prefix, name) - else: - attr_string = name - attributes.append((attr_string, value)) - - for name, value in sorted(attributes): - rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) - if element.text: - rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) - indent += 2 - for child in element: - serializeElement(child, indent) - if element.tail: - rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) - serializeElement(element, 0) - - return "\n".join(rv) - - def tostring(element): # pylint:disable=unused-variable - """Serialize an element and its child nodes to a string""" - rv = [] - filter = _ihatexml.InfosetFilter() - - def serializeElement(element): - if isinstance(element, ElementTree.ElementTree): - element = element.getroot() - - if element.tag == "<!DOCTYPE>": - if element.get("publicId") or element.get("systemId"): - publicId = element.get("publicId") or "" - systemId = element.get("systemId") or "" - rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" % - (element.text, publicId, systemId)) - else: - rv.append("<!DOCTYPE %s>" % (element.text,)) - elif element.tag == "DOCUMENT_ROOT": - if element.text is not None: - rv.append(element.text) - if element.tail is not None: - raise TypeError("Document node cannot have tail") - if hasattr(element, "attrib") and len(element.attrib): - raise TypeError("Document node cannot have attributes") - - for child in element: - serializeElement(child) - - elif element.tag == ElementTreeCommentType: - rv.append("<!--%s-->" % (element.text,)) - else: - # This is assumed to be an ordinary element - if not element.attrib: - rv.append("<%s>" % (filter.fromXmlName(element.tag),)) - else: - attr = " ".join(["%s=\"%s\"" % ( - filter.fromXmlName(name), value) - for name, value in element.attrib.items()]) - rv.append("<%s %s>" % (element.tag, attr)) - if element.text: - rv.append(element.text) - - for child in element: - serializeElement(child) - - rv.append("</%s>" % (element.tag,)) - - if element.tail: - rv.append(element.tail) - - serializeElement(element) - - return "".join(rv) - - class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable - documentClass = Document - doctypeClass = DocumentType - elementClass = Element - commentClass = Comment - fragmentClass = DocumentFragment - implementation = ElementTreeImplementation - - def testSerializer(self, element): - return testSerializer(element) - - def getDocument(self): - if fullTree: - return self.document._element - else: - if self.defaultNamespace is not None: - return self.document._element.find( - "{%s}html" % self.defaultNamespace) - else: - return self.document._element.find("html") - - def getFragment(self): - return base.TreeBuilder.getFragment(self)._element - - return locals() - - -getETreeModule = moduleFactoryFactory(getETreeBuilder) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py b/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py deleted file mode 100644 index f037759f42e74a88bb685679d2c3f574d186521e..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py +++ /dev/null @@ -1,392 +0,0 @@ -"""Module for supporting the lxml.etree library. The idea here is to use as much -of the native library as possible, without using fragile hacks like custom element -names that break between releases. The downside of this is that we cannot represent -all possible trees; specifically the following are known to cause problems: - -Text or comments as siblings of the root element -Docypes with no name - -When any of these things occur, we emit a DataLossWarning -""" - -from __future__ import absolute_import, division, unicode_literals -# pylint:disable=protected-access - -import warnings -import re -import sys - -try: - from collections.abc import MutableMapping -except ImportError: - from collections import MutableMapping - -from . import base -from ..constants import DataLossWarning -from .. import constants -from . import etree as etree_builders -from .. import _ihatexml - -import lxml.etree as etree -from pip._vendor.six import PY3, binary_type - - -fullTree = True -tag_regexp = re.compile("{([^}]*)}(.*)") - -comment_type = etree.Comment("asd").tag - - -class DocumentType(object): - def __init__(self, name, publicId, systemId): - self.name = name - self.publicId = publicId - self.systemId = systemId - - -class Document(object): - def __init__(self): - self._elementTree = None - self._childNodes = [] - - def appendChild(self, element): - last = self._elementTree.getroot() - for last in self._elementTree.getroot().itersiblings(): - pass - - last.addnext(element._element) - - def _getChildNodes(self): - return self._childNodes - - childNodes = property(_getChildNodes) - - -def testSerializer(element): - rv = [] - infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) - - def serializeElement(element, indent=0): - if not hasattr(element, "tag"): - if hasattr(element, "getroot"): - # Full tree case - rv.append("#document") - if element.docinfo.internalDTD: - if not (element.docinfo.public_id or - element.docinfo.system_url): - dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name - else: - dtd_str = """<!DOCTYPE %s "%s" "%s">""" % ( - element.docinfo.root_name, - element.docinfo.public_id, - element.docinfo.system_url) - rv.append("|%s%s" % (' ' * (indent + 2), dtd_str)) - next_element = element.getroot() - while next_element.getprevious() is not None: - next_element = next_element.getprevious() - while next_element is not None: - serializeElement(next_element, indent + 2) - next_element = next_element.getnext() - elif isinstance(element, str) or isinstance(element, bytes): - # Text in a fragment - assert isinstance(element, str) or sys.version_info[0] == 2 - rv.append("|%s\"%s\"" % (' ' * indent, element)) - else: - # Fragment case - rv.append("#document-fragment") - for next_element in element: - serializeElement(next_element, indent + 2) - elif element.tag == comment_type: - rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) - if hasattr(element, "tail") and element.tail: - rv.append("|%s\"%s\"" % (' ' * indent, element.tail)) - else: - assert isinstance(element, etree._Element) - nsmatch = etree_builders.tag_regexp.match(element.tag) - if nsmatch is not None: - ns = nsmatch.group(1) - tag = nsmatch.group(2) - prefix = constants.prefixes[ns] - rv.append("|%s<%s %s>" % (' ' * indent, prefix, - infosetFilter.fromXmlName(tag))) - else: - rv.append("|%s<%s>" % (' ' * indent, - infosetFilter.fromXmlName(element.tag))) - - if hasattr(element, "attrib"): - attributes = [] - for name, value in element.attrib.items(): - nsmatch = tag_regexp.match(name) - if nsmatch is not None: - ns, name = nsmatch.groups() - name = infosetFilter.fromXmlName(name) - prefix = constants.prefixes[ns] - attr_string = "%s %s" % (prefix, name) - else: - attr_string = infosetFilter.fromXmlName(name) - attributes.append((attr_string, value)) - - for name, value in sorted(attributes): - rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) - - if element.text: - rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) - indent += 2 - for child in element: - serializeElement(child, indent) - if hasattr(element, "tail") and element.tail: - rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) - serializeElement(element, 0) - - return "\n".join(rv) - - -def tostring(element): - """Serialize an element and its child nodes to a string""" - rv = [] - - def serializeElement(element): - if not hasattr(element, "tag"): - if element.docinfo.internalDTD: - if element.docinfo.doctype: - dtd_str = element.docinfo.doctype - else: - dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name - rv.append(dtd_str) - serializeElement(element.getroot()) - - elif element.tag == comment_type: - rv.append("<!--%s-->" % (element.text,)) - - else: - # This is assumed to be an ordinary element - if not element.attrib: - rv.append("<%s>" % (element.tag,)) - else: - attr = " ".join(["%s=\"%s\"" % (name, value) - for name, value in element.attrib.items()]) - rv.append("<%s %s>" % (element.tag, attr)) - if element.text: - rv.append(element.text) - - for child in element: - serializeElement(child) - - rv.append("</%s>" % (element.tag,)) - - if hasattr(element, "tail") and element.tail: - rv.append(element.tail) - - serializeElement(element) - - return "".join(rv) - - -class TreeBuilder(base.TreeBuilder): - documentClass = Document - doctypeClass = DocumentType - elementClass = None - commentClass = None - fragmentClass = Document - implementation = etree - - def __init__(self, namespaceHTMLElements, fullTree=False): - builder = etree_builders.getETreeModule(etree, fullTree=fullTree) - infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) - self.namespaceHTMLElements = namespaceHTMLElements - - class Attributes(MutableMapping): - def __init__(self, element): - self._element = element - - def _coerceKey(self, key): - if isinstance(key, tuple): - name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) - else: - name = infosetFilter.coerceAttribute(key) - return name - - def __getitem__(self, key): - value = self._element._element.attrib[self._coerceKey(key)] - if not PY3 and isinstance(value, binary_type): - value = value.decode("ascii") - return value - - def __setitem__(self, key, value): - self._element._element.attrib[self._coerceKey(key)] = value - - def __delitem__(self, key): - del self._element._element.attrib[self._coerceKey(key)] - - def __iter__(self): - return iter(self._element._element.attrib) - - def __len__(self): - return len(self._element._element.attrib) - - def clear(self): - return self._element._element.attrib.clear() - - class Element(builder.Element): - def __init__(self, name, namespace): - name = infosetFilter.coerceElement(name) - builder.Element.__init__(self, name, namespace=namespace) - self._attributes = Attributes(self) - - def _setName(self, name): - self._name = infosetFilter.coerceElement(name) - self._element.tag = self._getETreeTag( - self._name, self._namespace) - - def _getName(self): - return infosetFilter.fromXmlName(self._name) - - name = property(_getName, _setName) - - def _getAttributes(self): - return self._attributes - - def _setAttributes(self, value): - attributes = self.attributes - attributes.clear() - attributes.update(value) - - attributes = property(_getAttributes, _setAttributes) - - def insertText(self, data, insertBefore=None): - data = infosetFilter.coerceCharacters(data) - builder.Element.insertText(self, data, insertBefore) - - def cloneNode(self): - element = type(self)(self.name, self.namespace) - if self._element.attrib: - element._element.attrib.update(self._element.attrib) - return element - - class Comment(builder.Comment): - def __init__(self, data): - data = infosetFilter.coerceComment(data) - builder.Comment.__init__(self, data) - - def _setData(self, data): - data = infosetFilter.coerceComment(data) - self._element.text = data - - def _getData(self): - return self._element.text - - data = property(_getData, _setData) - - self.elementClass = Element - self.commentClass = Comment - # self.fragmentClass = builder.DocumentFragment - base.TreeBuilder.__init__(self, namespaceHTMLElements) - - def reset(self): - base.TreeBuilder.reset(self) - self.insertComment = self.insertCommentInitial - self.initial_comments = [] - self.doctype = None - - def testSerializer(self, element): - return testSerializer(element) - - def getDocument(self): - if fullTree: - return self.document._elementTree - else: - return self.document._elementTree.getroot() - - def getFragment(self): - fragment = [] - element = self.openElements[0]._element - if element.text: - fragment.append(element.text) - fragment.extend(list(element)) - if element.tail: - fragment.append(element.tail) - return fragment - - def insertDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - - if not name: - warnings.warn("lxml cannot represent empty doctype", DataLossWarning) - self.doctype = None - else: - coercedName = self.infosetFilter.coerceElement(name) - if coercedName != name: - warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning) - - doctype = self.doctypeClass(coercedName, publicId, systemId) - self.doctype = doctype - - def insertCommentInitial(self, data, parent=None): - assert parent is None or parent is self.document - assert self.document._elementTree is None - self.initial_comments.append(data) - - def insertCommentMain(self, data, parent=None): - if (parent == self.document and - self.document._elementTree.getroot()[-1].tag == comment_type): - warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning) - super(TreeBuilder, self).insertComment(data, parent) - - def insertRoot(self, token): - # Because of the way libxml2 works, it doesn't seem to be possible to - # alter information like the doctype after the tree has been parsed. - # Therefore we need to use the built-in parser to create our initial - # tree, after which we can add elements like normal - docStr = "" - if self.doctype: - assert self.doctype.name - docStr += "<!DOCTYPE %s" % self.doctype.name - if (self.doctype.publicId is not None or - self.doctype.systemId is not None): - docStr += (' PUBLIC "%s" ' % - (self.infosetFilter.coercePubid(self.doctype.publicId or ""))) - if self.doctype.systemId: - sysid = self.doctype.systemId - if sysid.find("'") >= 0 and sysid.find('"') >= 0: - warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning) - sysid = sysid.replace("'", 'U00027') - if sysid.find("'") >= 0: - docStr += '"%s"' % sysid - else: - docStr += "'%s'" % sysid - else: - docStr += "''" - docStr += ">" - if self.doctype.name != token["name"]: - warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning) - docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>" - root = etree.fromstring(docStr) - - # Append the initial comments: - for comment_token in self.initial_comments: - comment = self.commentClass(comment_token["data"]) - root.addprevious(comment._element) - - # Create the root document and add the ElementTree to it - self.document = self.documentClass() - self.document._elementTree = root.getroottree() - - # Give the root element the right name - name = token["name"] - namespace = token.get("namespace", self.defaultNamespace) - if namespace is None: - etree_tag = name - else: - etree_tag = "{%s}%s" % (namespace, name) - root.tag = etree_tag - - # Add the root element to the internal child/open data structures - root_element = self.elementClass(name, namespace) - root_element._element = root - self.document._childNodes.append(root_element) - self.openElements.append(root_element) - - # Reset to the default insert comment function - self.insertComment = self.insertCommentMain diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py deleted file mode 100644 index b2d3aac3137f5d374ec35dd4bbfdb5e732fc51f0..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py +++ /dev/null @@ -1,154 +0,0 @@ -"""A collection of modules for iterating through different kinds of -tree, generating tokens identical to those produced by the tokenizer -module. - -To create a tree walker for a new type of tree, you need to -implement a tree walker object (called TreeWalker by convention) that -implements a 'serialize' method which takes a tree as sole argument and -returns an iterator which generates tokens. -""" - -from __future__ import absolute_import, division, unicode_literals - -from .. import constants -from .._utils import default_etree - -__all__ = ["getTreeWalker", "pprint"] - -treeWalkerCache = {} - - -def getTreeWalker(treeType, implementation=None, **kwargs): - """Get a TreeWalker class for various types of tree with built-in support - - :arg str treeType: the name of the tree type required (case-insensitive). - Supported values are: - - * "dom": The xml.dom.minidom DOM implementation - * "etree": A generic walker for tree implementations exposing an - elementtree-like interface (known to work with ElementTree, - cElementTree and lxml.etree). - * "lxml": Optimized walker for lxml.etree - * "genshi": a Genshi stream - - :arg implementation: A module implementing the tree type e.g. - xml.etree.ElementTree or cElementTree (Currently applies to the "etree" - tree type only). - - :arg kwargs: keyword arguments passed to the etree walker--for other - walkers, this has no effect - - :returns: a TreeWalker class - - """ - - treeType = treeType.lower() - if treeType not in treeWalkerCache: - if treeType == "dom": - from . import dom - treeWalkerCache[treeType] = dom.TreeWalker - elif treeType == "genshi": - from . import genshi - treeWalkerCache[treeType] = genshi.TreeWalker - elif treeType == "lxml": - from . import etree_lxml - treeWalkerCache[treeType] = etree_lxml.TreeWalker - elif treeType == "etree": - from . import etree - if implementation is None: - implementation = default_etree - # XXX: NEVER cache here, caching is done in the etree submodule - return etree.getETreeModule(implementation, **kwargs).TreeWalker - return treeWalkerCache.get(treeType) - - -def concatenateCharacterTokens(tokens): - pendingCharacters = [] - for token in tokens: - type = token["type"] - if type in ("Characters", "SpaceCharacters"): - pendingCharacters.append(token["data"]) - else: - if pendingCharacters: - yield {"type": "Characters", "data": "".join(pendingCharacters)} - pendingCharacters = [] - yield token - if pendingCharacters: - yield {"type": "Characters", "data": "".join(pendingCharacters)} - - -def pprint(walker): - """Pretty printer for tree walkers - - Takes a TreeWalker instance and pretty prints the output of walking the tree. - - :arg walker: a TreeWalker instance - - """ - output = [] - indent = 0 - for token in concatenateCharacterTokens(walker): - type = token["type"] - if type in ("StartTag", "EmptyTag"): - # tag name - if token["namespace"] and token["namespace"] != constants.namespaces["html"]: - if token["namespace"] in constants.prefixes: - ns = constants.prefixes[token["namespace"]] - else: - ns = token["namespace"] - name = "%s %s" % (ns, token["name"]) - else: - name = token["name"] - output.append("%s<%s>" % (" " * indent, name)) - indent += 2 - # attributes (sorted for consistent ordering) - attrs = token["data"] - for (namespace, localname), value in sorted(attrs.items()): - if namespace: - if namespace in constants.prefixes: - ns = constants.prefixes[namespace] - else: - ns = namespace - name = "%s %s" % (ns, localname) - else: - name = localname - output.append("%s%s=\"%s\"" % (" " * indent, name, value)) - # self-closing - if type == "EmptyTag": - indent -= 2 - - elif type == "EndTag": - indent -= 2 - - elif type == "Comment": - output.append("%s<!-- %s -->" % (" " * indent, token["data"])) - - elif type == "Doctype": - if token["name"]: - if token["publicId"]: - output.append("""%s<!DOCTYPE %s "%s" "%s">""" % - (" " * indent, - token["name"], - token["publicId"], - token["systemId"] if token["systemId"] else "")) - elif token["systemId"]: - output.append("""%s<!DOCTYPE %s "" "%s">""" % - (" " * indent, - token["name"], - token["systemId"])) - else: - output.append("%s<!DOCTYPE %s>" % (" " * indent, - token["name"])) - else: - output.append("%s<!DOCTYPE >" % (" " * indent,)) - - elif type == "Characters": - output.append("%s\"%s\"" % (" " * indent, token["data"])) - - elif type == "SpaceCharacters": - assert False, "concatenateCharacterTokens should have got rid of all Space tokens" - - else: - raise ValueError("Unknown token type, %s" % type) - - return "\n".join(output) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 34fc06cf9a14d6271ded03f3177b5eb10e08e77d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/base.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/base.cpython-39.pyc deleted file mode 100644 index bbfc8763f08952bd75c0f9e1920594a433f827ab..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/base.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-39.pyc deleted file mode 100644 index 1147e58ae30c8a58fdd3f93181e7639a5f5a7f41..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-39.pyc deleted file mode 100644 index 46f1c0120b90088dcb6ea5059fe4074faf1dbb9c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-39.pyc deleted file mode 100644 index 01c44c8ed1b1fe27e01317fab78ef36d3185b050..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-39.pyc deleted file mode 100644 index bb93a33e4dc2f97d74c8b7c0d44ced264c332673..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/base.py b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/base.py deleted file mode 100644 index 80c474c4e939c149a22e811a5a1a5419313b7cc7..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/base.py +++ /dev/null @@ -1,252 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from xml.dom import Node -from ..constants import namespaces, voidElements, spaceCharacters - -__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN", - "TreeWalker", "NonRecursiveTreeWalker"] - -DOCUMENT = Node.DOCUMENT_NODE -DOCTYPE = Node.DOCUMENT_TYPE_NODE -TEXT = Node.TEXT_NODE -ELEMENT = Node.ELEMENT_NODE -COMMENT = Node.COMMENT_NODE -ENTITY = Node.ENTITY_NODE -UNKNOWN = "<#UNKNOWN#>" - -spaceCharacters = "".join(spaceCharacters) - - -class TreeWalker(object): - """Walks a tree yielding tokens - - Tokens are dicts that all have a ``type`` field specifying the type of the - token. - - """ - def __init__(self, tree): - """Creates a TreeWalker - - :arg tree: the tree to walk - - """ - self.tree = tree - - def __iter__(self): - raise NotImplementedError - - def error(self, msg): - """Generates an error token with the given message - - :arg msg: the error message - - :returns: SerializeError token - - """ - return {"type": "SerializeError", "data": msg} - - def emptyTag(self, namespace, name, attrs, hasChildren=False): - """Generates an EmptyTag token - - :arg namespace: the namespace of the token--can be ``None`` - - :arg name: the name of the element - - :arg attrs: the attributes of the element as a dict - - :arg hasChildren: whether or not to yield a SerializationError because - this tag shouldn't have children - - :returns: EmptyTag token - - """ - yield {"type": "EmptyTag", "name": name, - "namespace": namespace, - "data": attrs} - if hasChildren: - yield self.error("Void element has children") - - def startTag(self, namespace, name, attrs): - """Generates a StartTag token - - :arg namespace: the namespace of the token--can be ``None`` - - :arg name: the name of the element - - :arg attrs: the attributes of the element as a dict - - :returns: StartTag token - - """ - return {"type": "StartTag", - "name": name, - "namespace": namespace, - "data": attrs} - - def endTag(self, namespace, name): - """Generates an EndTag token - - :arg namespace: the namespace of the token--can be ``None`` - - :arg name: the name of the element - - :returns: EndTag token - - """ - return {"type": "EndTag", - "name": name, - "namespace": namespace} - - def text(self, data): - """Generates SpaceCharacters and Characters tokens - - Depending on what's in the data, this generates one or more - ``SpaceCharacters`` and ``Characters`` tokens. - - For example: - - >>> from html5lib.treewalkers.base import TreeWalker - >>> # Give it an empty tree just so it instantiates - >>> walker = TreeWalker([]) - >>> list(walker.text('')) - [] - >>> list(walker.text(' ')) - [{u'data': ' ', u'type': u'SpaceCharacters'}] - >>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE - [{u'data': ' ', u'type': u'SpaceCharacters'}, - {u'data': u'abc', u'type': u'Characters'}, - {u'data': u' ', u'type': u'SpaceCharacters'}] - - :arg data: the text data - - :returns: one or more ``SpaceCharacters`` and ``Characters`` tokens - - """ - data = data - middle = data.lstrip(spaceCharacters) - left = data[:len(data) - len(middle)] - if left: - yield {"type": "SpaceCharacters", "data": left} - data = middle - middle = data.rstrip(spaceCharacters) - right = data[len(middle):] - if middle: - yield {"type": "Characters", "data": middle} - if right: - yield {"type": "SpaceCharacters", "data": right} - - def comment(self, data): - """Generates a Comment token - - :arg data: the comment - - :returns: Comment token - - """ - return {"type": "Comment", "data": data} - - def doctype(self, name, publicId=None, systemId=None): - """Generates a Doctype token - - :arg name: - - :arg publicId: - - :arg systemId: - - :returns: the Doctype token - - """ - return {"type": "Doctype", - "name": name, - "publicId": publicId, - "systemId": systemId} - - def entity(self, name): - """Generates an Entity token - - :arg name: the entity name - - :returns: an Entity token - - """ - return {"type": "Entity", "name": name} - - def unknown(self, nodeType): - """Handles unknown node types""" - return self.error("Unknown node type: " + nodeType) - - -class NonRecursiveTreeWalker(TreeWalker): - def getNodeDetails(self, node): - raise NotImplementedError - - def getFirstChild(self, node): - raise NotImplementedError - - def getNextSibling(self, node): - raise NotImplementedError - - def getParentNode(self, node): - raise NotImplementedError - - def __iter__(self): - currentNode = self.tree - while currentNode is not None: - details = self.getNodeDetails(currentNode) - type, details = details[0], details[1:] - hasChildren = False - - if type == DOCTYPE: - yield self.doctype(*details) - - elif type == TEXT: - for token in self.text(*details): - yield token - - elif type == ELEMENT: - namespace, name, attributes, hasChildren = details - if (not namespace or namespace == namespaces["html"]) and name in voidElements: - for token in self.emptyTag(namespace, name, attributes, - hasChildren): - yield token - hasChildren = False - else: - yield self.startTag(namespace, name, attributes) - - elif type == COMMENT: - yield self.comment(details[0]) - - elif type == ENTITY: - yield self.entity(details[0]) - - elif type == DOCUMENT: - hasChildren = True - - else: - yield self.unknown(details[0]) - - if hasChildren: - firstChild = self.getFirstChild(currentNode) - else: - firstChild = None - - if firstChild is not None: - currentNode = firstChild - else: - while currentNode is not None: - details = self.getNodeDetails(currentNode) - type, details = details[0], details[1:] - if type == ELEMENT: - namespace, name, attributes, hasChildren = details - if (namespace and namespace != namespaces["html"]) or name not in voidElements: - yield self.endTag(namespace, name) - if self.tree is currentNode: - currentNode = None - break - nextSibling = self.getNextSibling(currentNode) - if nextSibling is not None: - currentNode = nextSibling - break - else: - currentNode = self.getParentNode(currentNode) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/dom.py b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/dom.py deleted file mode 100644 index b0c89b001fd3b60511734c31e452c1d2053468d0..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/dom.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from xml.dom import Node - -from . import base - - -class TreeWalker(base.NonRecursiveTreeWalker): - def getNodeDetails(self, node): - if node.nodeType == Node.DOCUMENT_TYPE_NODE: - return base.DOCTYPE, node.name, node.publicId, node.systemId - - elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): - return base.TEXT, node.nodeValue - - elif node.nodeType == Node.ELEMENT_NODE: - attrs = {} - for attr in list(node.attributes.keys()): - attr = node.getAttributeNode(attr) - if attr.namespaceURI: - attrs[(attr.namespaceURI, attr.localName)] = attr.value - else: - attrs[(None, attr.name)] = attr.value - return (base.ELEMENT, node.namespaceURI, node.nodeName, - attrs, node.hasChildNodes()) - - elif node.nodeType == Node.COMMENT_NODE: - return base.COMMENT, node.nodeValue - - elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): - return (base.DOCUMENT,) - - else: - return base.UNKNOWN, node.nodeType - - def getFirstChild(self, node): - return node.firstChild - - def getNextSibling(self, node): - return node.nextSibling - - def getParentNode(self, node): - return node.parentNode diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree.py b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree.py deleted file mode 100644 index 837b27ec486924eb9ccef53c6a5d578bd787aefd..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree.py +++ /dev/null @@ -1,131 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from collections import OrderedDict -import re - -from pip._vendor.six import string_types - -from . import base -from .._utils import moduleFactoryFactory - -tag_regexp = re.compile("{([^}]*)}(.*)") - - -def getETreeBuilder(ElementTreeImplementation): - ElementTree = ElementTreeImplementation - ElementTreeCommentType = ElementTree.Comment("asd").tag - - class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable - """Given the particular ElementTree representation, this implementation, - to avoid using recursion, returns "nodes" as tuples with the following - content: - - 1. The current element - - 2. The index of the element relative to its parent - - 3. A stack of ancestor elements - - 4. A flag "text", "tail" or None to indicate if the current node is a - text node; either the text or tail of the current element (1) - """ - def getNodeDetails(self, node): - if isinstance(node, tuple): # It might be the root Element - elt, _, _, flag = node - if flag in ("text", "tail"): - return base.TEXT, getattr(elt, flag) - else: - node = elt - - if not(hasattr(node, "tag")): - node = node.getroot() - - if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"): - return (base.DOCUMENT,) - - elif node.tag == "<!DOCTYPE>": - return (base.DOCTYPE, node.text, - node.get("publicId"), node.get("systemId")) - - elif node.tag == ElementTreeCommentType: - return base.COMMENT, node.text - - else: - assert isinstance(node.tag, string_types), type(node.tag) - # This is assumed to be an ordinary element - match = tag_regexp.match(node.tag) - if match: - namespace, tag = match.groups() - else: - namespace = None - tag = node.tag - attrs = OrderedDict() - for name, value in list(node.attrib.items()): - match = tag_regexp.match(name) - if match: - attrs[(match.group(1), match.group(2))] = value - else: - attrs[(None, name)] = value - return (base.ELEMENT, namespace, tag, - attrs, len(node) or node.text) - - def getFirstChild(self, node): - if isinstance(node, tuple): - element, key, parents, flag = node - else: - element, key, parents, flag = node, None, [], None - - if flag in ("text", "tail"): - return None - else: - if element.text: - return element, key, parents, "text" - elif len(element): - parents.append(element) - return element[0], 0, parents, None - else: - return None - - def getNextSibling(self, node): - if isinstance(node, tuple): - element, key, parents, flag = node - else: - return None - - if flag == "text": - if len(element): - parents.append(element) - return element[0], 0, parents, None - else: - return None - else: - if element.tail and flag != "tail": - return element, key, parents, "tail" - elif key < len(parents[-1]) - 1: - return parents[-1][key + 1], key + 1, parents, None - else: - return None - - def getParentNode(self, node): - if isinstance(node, tuple): - element, key, parents, flag = node - else: - return None - - if flag == "text": - if not parents: - return element - else: - return element, key, parents, None - else: - parent = parents.pop() - if not parents: - return parent - else: - assert list(parents[-1]).count(parent) == 1 - return parent, list(parents[-1]).index(parent), parents, None - - return locals() - - -getETreeModule = moduleFactoryFactory(getETreeBuilder) diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py deleted file mode 100644 index c56af390fe250c1048036375fff340db5d2807a8..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py +++ /dev/null @@ -1,215 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import text_type - -from collections import OrderedDict - -from lxml import etree -from ..treebuilders.etree import tag_regexp - -from . import base - -from .. import _ihatexml - - -def ensure_str(s): - if s is None: - return None - elif isinstance(s, text_type): - return s - else: - return s.decode("ascii", "strict") - - -class Root(object): - def __init__(self, et): - self.elementtree = et - self.children = [] - - try: - if et.docinfo.internalDTD: - self.children.append(Doctype(self, - ensure_str(et.docinfo.root_name), - ensure_str(et.docinfo.public_id), - ensure_str(et.docinfo.system_url))) - except AttributeError: - pass - - try: - node = et.getroot() - except AttributeError: - node = et - - while node.getprevious() is not None: - node = node.getprevious() - while node is not None: - self.children.append(node) - node = node.getnext() - - self.text = None - self.tail = None - - def __getitem__(self, key): - return self.children[key] - - def getnext(self): - return None - - def __len__(self): - return 1 - - -class Doctype(object): - def __init__(self, root_node, name, public_id, system_id): - self.root_node = root_node - self.name = name - self.public_id = public_id - self.system_id = system_id - - self.text = None - self.tail = None - - def getnext(self): - return self.root_node.children[1] - - -class FragmentRoot(Root): - def __init__(self, children): - self.children = [FragmentWrapper(self, child) for child in children] - self.text = self.tail = None - - def getnext(self): - return None - - -class FragmentWrapper(object): - def __init__(self, fragment_root, obj): - self.root_node = fragment_root - self.obj = obj - if hasattr(self.obj, 'text'): - self.text = ensure_str(self.obj.text) - else: - self.text = None - if hasattr(self.obj, 'tail'): - self.tail = ensure_str(self.obj.tail) - else: - self.tail = None - - def __getattr__(self, name): - return getattr(self.obj, name) - - def getnext(self): - siblings = self.root_node.children - idx = siblings.index(self) - if idx < len(siblings) - 1: - return siblings[idx + 1] - else: - return None - - def __getitem__(self, key): - return self.obj[key] - - def __bool__(self): - return bool(self.obj) - - def getparent(self): - return None - - def __str__(self): - return str(self.obj) - - def __unicode__(self): - return str(self.obj) - - def __len__(self): - return len(self.obj) - - -class TreeWalker(base.NonRecursiveTreeWalker): - def __init__(self, tree): - # pylint:disable=redefined-variable-type - if isinstance(tree, list): - self.fragmentChildren = set(tree) - tree = FragmentRoot(tree) - else: - self.fragmentChildren = set() - tree = Root(tree) - base.NonRecursiveTreeWalker.__init__(self, tree) - self.filter = _ihatexml.InfosetFilter() - - def getNodeDetails(self, node): - if isinstance(node, tuple): # Text node - node, key = node - assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key - return base.TEXT, ensure_str(getattr(node, key)) - - elif isinstance(node, Root): - return (base.DOCUMENT,) - - elif isinstance(node, Doctype): - return base.DOCTYPE, node.name, node.public_id, node.system_id - - elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"): - return base.TEXT, ensure_str(node.obj) - - elif node.tag == etree.Comment: - return base.COMMENT, ensure_str(node.text) - - elif node.tag == etree.Entity: - return base.ENTITY, ensure_str(node.text)[1:-1] # strip &; - - else: - # This is assumed to be an ordinary element - match = tag_regexp.match(ensure_str(node.tag)) - if match: - namespace, tag = match.groups() - else: - namespace = None - tag = ensure_str(node.tag) - attrs = OrderedDict() - for name, value in list(node.attrib.items()): - name = ensure_str(name) - value = ensure_str(value) - match = tag_regexp.match(name) - if match: - attrs[(match.group(1), match.group(2))] = value - else: - attrs[(None, name)] = value - return (base.ELEMENT, namespace, self.filter.fromXmlName(tag), - attrs, len(node) > 0 or node.text) - - def getFirstChild(self, node): - assert not isinstance(node, tuple), "Text nodes have no children" - - assert len(node) or node.text, "Node has no children" - if node.text: - return (node, "text") - else: - return node[0] - - def getNextSibling(self, node): - if isinstance(node, tuple): # Text node - node, key = node - assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key - if key == "text": - # XXX: we cannot use a "bool(node) and node[0] or None" construct here - # because node[0] might evaluate to False if it has no child element - if len(node): - return node[0] - else: - return None - else: # tail - return node.getnext() - - return (node, "tail") if node.tail else node.getnext() - - def getParentNode(self, node): - if isinstance(node, tuple): # Text node - node, key = node - assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key - if key == "text": - return node - # else: fallback to "normal" processing - elif node in self.fragmentChildren: - return None - - return node.getparent() diff --git a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py b/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py deleted file mode 100644 index 7483be27d4d24f845e56b6954ee63eec730c00aa..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py +++ /dev/null @@ -1,69 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from genshi.core import QName -from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT -from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT - -from . import base - -from ..constants import voidElements, namespaces - - -class TreeWalker(base.TreeWalker): - def __iter__(self): - # Buffer the events so we can pass in the following one - previous = None - for event in self.tree: - if previous is not None: - for token in self.tokens(previous, event): - yield token - previous = event - - # Don't forget the final event! - if previous is not None: - for token in self.tokens(previous, None): - yield token - - def tokens(self, event, next): - kind, data, _ = event - if kind == START: - tag, attribs = data - name = tag.localname - namespace = tag.namespace - converted_attribs = {} - for k, v in attribs: - if isinstance(k, QName): - converted_attribs[(k.namespace, k.localname)] = v - else: - converted_attribs[(None, k)] = v - - if namespace == namespaces["html"] and name in voidElements: - for token in self.emptyTag(namespace, name, converted_attribs, - not next or next[0] != END or - next[1] != tag): - yield token - else: - yield self.startTag(namespace, name, converted_attribs) - - elif kind == END: - name = data.localname - namespace = data.namespace - if namespace != namespaces["html"] or name not in voidElements: - yield self.endTag(namespace, name) - - elif kind == COMMENT: - yield self.comment(data) - - elif kind == TEXT: - for token in self.text(data): - yield token - - elif kind == DOCTYPE: - yield self.doctype(*data) - - elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, - START_CDATA, END_CDATA, PI): - pass - - else: - yield self.unknown(kind) diff --git a/env/Lib/site-packages/pip/_vendor/idna/__init__.py b/env/Lib/site-packages/pip/_vendor/idna/__init__.py index 847bf9354781fddc44ed2f47055a67c025972fd4..a40eeafcc914108ca79c5d83d6e81da1b29c6e80 100644 --- a/env/Lib/site-packages/pip/_vendor/idna/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/idna/__init__.py @@ -1,2 +1,44 @@ from .package_data import __version__ -from .core import * +from .core import ( + IDNABidiError, + IDNAError, + InvalidCodepoint, + InvalidCodepointContext, + alabel, + check_bidi, + check_hyphen_ok, + check_initial_combiner, + check_label, + check_nfc, + decode, + encode, + ulabel, + uts46_remap, + valid_contextj, + valid_contexto, + valid_label_length, + valid_string_length, +) +from .intranges import intranges_contain + +__all__ = [ + "IDNABidiError", + "IDNAError", + "InvalidCodepoint", + "InvalidCodepointContext", + "alabel", + "check_bidi", + "check_hyphen_ok", + "check_initial_combiner", + "check_label", + "check_nfc", + "decode", + "encode", + "intranges_contain", + "ulabel", + "uts46_remap", + "valid_contextj", + "valid_contexto", + "valid_label_length", + "valid_string_length", +] diff --git a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 9153748e3386dc6e8428f5c6eeee01482af68441..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-39.pyc deleted file mode 100644 index 8842b410416ab5da9865a9fe1ff35997e2fb9a68..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-39.pyc deleted file mode 100644 index aa878326e9d1859aea85e744013129b15ff77d1d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-39.pyc deleted file mode 100644 index 92702b5b1057f0b3f71546eef5f22b964953fee6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/core.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-39.pyc deleted file mode 100644 index 50dcaa5a94151ca7e0d72d83f83aff1d52423aa6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-39.pyc deleted file mode 100644 index 3378c2c3ec717fcdb9763c7d652400034fdcafed..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-39.pyc deleted file mode 100644 index 4136077de8dc3d40cfbeca722e5029a3bfb8080a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-39.pyc deleted file mode 100644 index 59e3992c8093fb82c859a5e4cae20fd07c8563ea..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/idna/codec.py b/env/Lib/site-packages/pip/_vendor/idna/codec.py index 30fe72fbd402f3fc38dbba8bf4074fe5f7c1e62f..1ca9ba62c208527b796b49306f4b8c95eb868a51 100644 --- a/env/Lib/site-packages/pip/_vendor/idna/codec.py +++ b/env/Lib/site-packages/pip/_vendor/idna/codec.py @@ -1,23 +1,22 @@ from .core import encode, decode, alabel, ulabel, IDNAError import codecs import re +from typing import Tuple, Optional _unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]') class Codec(codecs.Codec): - def encode(self, data, errors='strict'): - + def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]: if errors != 'strict': raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) if not data: - return "", 0 + return b"", 0 return encode(data), len(data) - def decode(self, data, errors='strict'): - + def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]: if errors != 'strict': raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) @@ -27,12 +26,12 @@ class Codec(codecs.Codec): return decode(data), len(data) class IncrementalEncoder(codecs.BufferedIncrementalEncoder): - def _buffer_encode(self, data, errors, final): + def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore if errors != 'strict': raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) if not data: - return ('', 0) + return "", 0 labels = _unicode_dots_re.split(data) trailing_dot = '' @@ -55,12 +54,12 @@ class IncrementalEncoder(codecs.BufferedIncrementalEncoder): size += len(label) # Join with U+002E - result = '.'.join(result) + trailing_dot + result_str = '.'.join(result) + trailing_dot # type: ignore size += len(trailing_dot) - return (result, size) + return result_str, size class IncrementalDecoder(codecs.BufferedIncrementalDecoder): - def _buffer_decode(self, data, errors, final): + def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore if errors != 'strict': raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) @@ -87,22 +86,25 @@ class IncrementalDecoder(codecs.BufferedIncrementalDecoder): size += 1 size += len(label) - result = '.'.join(result) + trailing_dot + result_str = '.'.join(result) + trailing_dot size += len(trailing_dot) - return (result, size) + return (result_str, size) class StreamWriter(Codec, codecs.StreamWriter): pass + class StreamReader(Codec, codecs.StreamReader): pass -def getregentry(): + +def getregentry() -> codecs.CodecInfo: + # Compatibility as a search_function for codecs.register() return codecs.CodecInfo( name='idna', - encode=Codec().encode, - decode=Codec().decode, + encode=Codec().encode, # type: ignore + decode=Codec().decode, # type: ignore incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, diff --git a/env/Lib/site-packages/pip/_vendor/idna/compat.py b/env/Lib/site-packages/pip/_vendor/idna/compat.py index 2e622d6f942cc2c155c8bd000b7aa5d564233563..786e6bda63699b72d588ba91dd73df017570aee5 100644 --- a/env/Lib/site-packages/pip/_vendor/idna/compat.py +++ b/env/Lib/site-packages/pip/_vendor/idna/compat.py @@ -1,12 +1,13 @@ from .core import * from .codec import * +from typing import Any, Union -def ToASCII(label): +def ToASCII(label: str) -> bytes: return encode(label) -def ToUnicode(label): +def ToUnicode(label: Union[bytes, bytearray]) -> str: return decode(label) -def nameprep(s): +def nameprep(s: Any) -> None: raise NotImplementedError('IDNA 2008 does not utilise nameprep protocol') diff --git a/env/Lib/site-packages/pip/_vendor/idna/core.py b/env/Lib/site-packages/pip/_vendor/idna/core.py index 2c193d631cde60a1324cb201724976f529a6c964..4f3003711020eac05ef5a19ab29ba5670d89f642 100644 --- a/env/Lib/site-packages/pip/_vendor/idna/core.py +++ b/env/Lib/site-packages/pip/_vendor/idna/core.py @@ -2,7 +2,7 @@ from . import idnadata import bisect import unicodedata import re -import sys +from typing import Union, Optional from .intranges import intranges_contain _virama_combining_class = 9 @@ -29,39 +29,36 @@ class InvalidCodepointContext(IDNAError): pass -def _combining_class(cp): +def _combining_class(cp: int) -> int: v = unicodedata.combining(chr(cp)) if v == 0: if not unicodedata.name(chr(cp)): raise ValueError('Unknown character in unicodedata') return v -def _is_script(cp, script): +def _is_script(cp: str, script: str) -> bool: return intranges_contain(ord(cp), idnadata.scripts[script]) -def _punycode(s): +def _punycode(s: str) -> bytes: return s.encode('punycode') -def _unot(s): +def _unot(s: int) -> str: return 'U+{:04X}'.format(s) -def valid_label_length(label): - +def valid_label_length(label: Union[bytes, str]) -> bool: if len(label) > 63: return False return True -def valid_string_length(label, trailing_dot): - +def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool: if len(label) > (254 if trailing_dot else 253): return False return True -def check_bidi(label, check_ltr=False): - +def check_bidi(label: str, check_ltr: bool = False) -> bool: # Bidi rules should only be applied if string contains RTL characters bidi_label = False for (idx, cp) in enumerate(label, 1): @@ -84,7 +81,7 @@ def check_bidi(label, check_ltr=False): raise IDNABidiError('First codepoint in label {} must be directionality L, R or AL'.format(repr(label))) valid_ending = False - number_type = False + number_type = None # type: Optional[str] for (idx, cp) in enumerate(label, 1): direction = unicodedata.bidirectional(cp) @@ -120,15 +117,13 @@ def check_bidi(label, check_ltr=False): return True -def check_initial_combiner(label): - +def check_initial_combiner(label: str) -> bool: if unicodedata.category(label[0])[0] == 'M': raise IDNAError('Label begins with an illegal combining character') return True -def check_hyphen_ok(label): - +def check_hyphen_ok(label: str) -> bool: if label[2:4] == '--': raise IDNAError('Label has disallowed hyphens in 3rd and 4th position') if label[0] == '-' or label[-1] == '-': @@ -136,14 +131,12 @@ def check_hyphen_ok(label): return True -def check_nfc(label): - +def check_nfc(label: str) -> None: if unicodedata.normalize('NFC', label) != label: raise IDNAError('Label must be in Normalization Form C') -def valid_contextj(label, pos): - +def valid_contextj(label: str, pos: int) -> bool: cp_value = ord(label[pos]) if cp_value == 0x200c: @@ -186,8 +179,7 @@ def valid_contextj(label, pos): return False -def valid_contexto(label, pos, exception=False): - +def valid_contexto(label: str, pos: int, exception: bool = False) -> bool: cp_value = ord(label[pos]) if cp_value == 0x00b7: @@ -226,9 +218,10 @@ def valid_contexto(label, pos, exception=False): return False return True + return False -def check_label(label): +def check_label(label: Union[str, bytes, bytearray]) -> None: if isinstance(label, (bytes, bytearray)): label = label.decode('utf-8') if len(label) == 0: @@ -259,14 +252,13 @@ def check_label(label): check_bidi(label) -def alabel(label): - +def alabel(label: str) -> bytes: try: - label = label.encode('ascii') - ulabel(label) - if not valid_label_length(label): + label_bytes = label.encode('ascii') + ulabel(label_bytes) + if not valid_label_length(label_bytes): raise IDNAError('Label too long') - return label + return label_bytes except UnicodeEncodeError: pass @@ -275,51 +267,58 @@ def alabel(label): label = str(label) check_label(label) - label = _punycode(label) - label = _alabel_prefix + label + label_bytes = _punycode(label) + label_bytes = _alabel_prefix + label_bytes - if not valid_label_length(label): + if not valid_label_length(label_bytes): raise IDNAError('Label too long') - return label - + return label_bytes -def ulabel(label): +def ulabel(label: Union[str, bytes, bytearray]) -> str: if not isinstance(label, (bytes, bytearray)): try: - label = label.encode('ascii') + label_bytes = label.encode('ascii') except UnicodeEncodeError: check_label(label) return label + else: + label_bytes = label - label = label.lower() - if label.startswith(_alabel_prefix): - label = label[len(_alabel_prefix):] - if not label: + label_bytes = label_bytes.lower() + if label_bytes.startswith(_alabel_prefix): + label_bytes = label_bytes[len(_alabel_prefix):] + if not label_bytes: raise IDNAError('Malformed A-label, no Punycode eligible content found') - if label.decode('ascii')[-1] == '-': + if label_bytes.decode('ascii')[-1] == '-': raise IDNAError('A-label must not end with a hyphen') else: - check_label(label) - return label.decode('ascii') + check_label(label_bytes) + return label_bytes.decode('ascii') - label = label.decode('punycode') + try: + label = label_bytes.decode('punycode') + except UnicodeError: + raise IDNAError('Invalid A-label') check_label(label) return label -def uts46_remap(domain, std3_rules=True, transitional=False): +def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str: """Re-map the characters in the string according to UTS46 processing.""" from .uts46data import uts46data output = '' - try: - for pos, char in enumerate(domain): - code_point = ord(char) + + for pos, char in enumerate(domain): + code_point = ord(char) + try: uts46row = uts46data[code_point if code_point < 256 else bisect.bisect_left(uts46data, (code_point, 'Z')) - 1] status = uts46row[1] - replacement = uts46row[2] if len(uts46row) == 3 else None + replacement = None # type: Optional[str] + if len(uts46row) == 3: + replacement = uts46row[2] # type: ignore if (status == 'V' or (status == 'D' and not transitional) or (status == '3' and not std3_rules and replacement is None)): @@ -330,17 +329,20 @@ def uts46_remap(domain, std3_rules=True, transitional=False): output += replacement elif status != 'I': raise IndexError() - return unicodedata.normalize('NFC', output) - except IndexError: - raise InvalidCodepoint( - 'Codepoint {} not allowed at position {} in {}'.format( - _unot(code_point), pos + 1, repr(domain))) + except IndexError: + raise InvalidCodepoint( + 'Codepoint {} not allowed at position {} in {}'.format( + _unot(code_point), pos + 1, repr(domain))) + return unicodedata.normalize('NFC', output) -def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False): +def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes: if isinstance(s, (bytes, bytearray)): - s = s.decode('ascii') + try: + s = s.decode('ascii') + except UnicodeDecodeError: + raise IDNAError('should pass a unicode string to the function rather than a byte string.') if uts46: s = uts46_remap(s, std3_rules, transitional) trailing_dot = False @@ -368,10 +370,12 @@ def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False): return s -def decode(s, strict=False, uts46=False, std3_rules=False): - - if isinstance(s, (bytes, bytearray)): - s = s.decode('ascii') +def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False) -> str: + try: + if isinstance(s, (bytes, bytearray)): + s = s.decode('ascii') + except UnicodeDecodeError: + raise IDNAError('Invalid ASCII in A-label') if uts46: s = uts46_remap(s, std3_rules, False) trailing_dot = False diff --git a/env/Lib/site-packages/pip/_vendor/idna/idnadata.py b/env/Lib/site-packages/pip/_vendor/idna/idnadata.py index b86a3e06e429aa1bdd67713a3d4998410375dd49..67db4625829680298b2a5a9032a379d870a00700 100644 --- a/env/Lib/site-packages/pip/_vendor/idna/idnadata.py +++ b/env/Lib/site-packages/pip/_vendor/idna/idnadata.py @@ -1,6 +1,6 @@ # This file is automatically generated by tools/idna-data -__version__ = '13.0.0' +__version__ = '15.0.0' scripts = { 'Greek': ( 0x37000000374, @@ -49,17 +49,19 @@ scripts = { 0x30210000302a, 0x30380000303c, 0x340000004dc0, - 0x4e0000009ffd, + 0x4e000000a000, 0xf9000000fa6e, 0xfa700000fada, + 0x16fe200016fe4, 0x16ff000016ff2, - 0x200000002a6de, - 0x2a7000002b735, + 0x200000002a6e0, + 0x2a7000002b73a, 0x2b7400002b81e, 0x2b8200002cea2, 0x2ceb00002ebe1, 0x2f8000002fa1e, 0x300000003134b, + 0x31350000323b0, ), 'Hebrew': ( 0x591000005c8, @@ -75,7 +77,8 @@ scripts = { 'Hiragana': ( 0x304100003097, 0x309d000030a0, - 0x1b0010001b11f, + 0x1b0010001b120, + 0x1b1320001b133, 0x1b1500001b153, 0x1f2000001f201, ), @@ -87,7 +90,12 @@ scripts = { 0x330000003358, 0xff660000ff70, 0xff710000ff9e, + 0x1aff00001aff4, + 0x1aff50001affc, + 0x1affd0001afff, 0x1b0000001b001, + 0x1b1200001b123, + 0x1b1550001b156, 0x1b1640001b168, ), } @@ -405,6 +413,39 @@ joining_types = { 0x868: 68, 0x869: 82, 0x86a: 82, + 0x870: 82, + 0x871: 82, + 0x872: 82, + 0x873: 82, + 0x874: 82, + 0x875: 82, + 0x876: 82, + 0x877: 82, + 0x878: 82, + 0x879: 82, + 0x87a: 82, + 0x87b: 82, + 0x87c: 82, + 0x87d: 82, + 0x87e: 82, + 0x87f: 82, + 0x880: 82, + 0x881: 82, + 0x882: 82, + 0x883: 67, + 0x884: 67, + 0x885: 67, + 0x886: 68, + 0x887: 85, + 0x888: 85, + 0x889: 68, + 0x88a: 68, + 0x88b: 68, + 0x88c: 68, + 0x88d: 68, + 0x88e: 82, + 0x890: 85, + 0x891: 85, 0x8a0: 68, 0x8a1: 68, 0x8a2: 68, @@ -426,6 +467,7 @@ joining_types = { 0x8b2: 82, 0x8b3: 68, 0x8b4: 68, + 0x8b5: 68, 0x8b6: 68, 0x8b7: 68, 0x8b8: 68, @@ -444,6 +486,7 @@ joining_types = { 0x8c5: 68, 0x8c6: 68, 0x8c7: 68, + 0x8c8: 68, 0x8e2: 85, 0x1806: 85, 0x1807: 68, @@ -768,6 +811,24 @@ joining_types = { 0x10f52: 68, 0x10f53: 68, 0x10f54: 82, + 0x10f70: 68, + 0x10f71: 68, + 0x10f72: 68, + 0x10f73: 68, + 0x10f74: 82, + 0x10f75: 82, + 0x10f76: 68, + 0x10f77: 68, + 0x10f78: 68, + 0x10f79: 68, + 0x10f7a: 68, + 0x10f7b: 68, + 0x10f7c: 68, + 0x10f7d: 68, + 0x10f7e: 68, + 0x10f7f: 68, + 0x10f80: 68, + 0x10f81: 68, 0x10fb0: 68, 0x10fb1: 85, 0x10fb2: 68, @@ -1168,9 +1229,9 @@ codepoint_classes = { 0x8000000082e, 0x8400000085c, 0x8600000086b, - 0x8a0000008b5, - 0x8b6000008c8, - 0x8d3000008e2, + 0x87000000888, + 0x8890000088f, + 0x898000008e2, 0x8e300000958, 0x96000000964, 0x96600000970, @@ -1252,11 +1313,12 @@ codepoint_classes = { 0xc0e00000c11, 0xc1200000c29, 0xc2a00000c3a, - 0xc3d00000c45, + 0xc3c00000c45, 0xc4600000c49, 0xc4a00000c4e, 0xc5500000c57, 0xc5800000c5b, + 0xc5d00000c5e, 0xc6000000c64, 0xc6600000c70, 0xc8000000c84, @@ -1269,10 +1331,10 @@ codepoint_classes = { 0xcc600000cc9, 0xcca00000cce, 0xcd500000cd7, - 0xcde00000cdf, + 0xcdd00000cdf, 0xce000000ce4, 0xce600000cf0, - 0xcf100000cf3, + 0xcf100000cf4, 0xd0000000d0d, 0xd0e00000d11, 0xd1200000d45, @@ -1307,7 +1369,7 @@ codepoint_classes = { 0xeb400000ebe, 0xec000000ec5, 0xec600000ec7, - 0xec800000ece, + 0xec800000ecf, 0xed000000eda, 0xede00000ee0, 0xf0000000f01, @@ -1366,9 +1428,8 @@ codepoint_classes = { 0x16810000169b, 0x16a0000016eb, 0x16f1000016f9, - 0x17000000170d, - 0x170e00001715, - 0x172000001735, + 0x170000001716, + 0x171f00001735, 0x174000001754, 0x17600000176d, 0x176e00001771, @@ -1397,8 +1458,8 @@ codepoint_classes = { 0x1a9000001a9a, 0x1aa700001aa8, 0x1ab000001abe, - 0x1abf00001ac1, - 0x1b0000001b4c, + 0x1abf00001acf, + 0x1b0000001b4d, 0x1b5000001b5a, 0x1b6b00001b74, 0x1b8000001bf4, @@ -1413,8 +1474,7 @@ codepoint_classes = { 0x1d4e00001d4f, 0x1d6b00001d78, 0x1d7900001d9b, - 0x1dc000001dfa, - 0x1dfb00001e00, + 0x1dc000001e00, 0x1e0100001e02, 0x1e0300001e04, 0x1e0500001e06, @@ -1563,7 +1623,7 @@ codepoint_classes = { 0x1ff600001ff7, 0x214e0000214f, 0x218400002185, - 0x2c3000002c5f, + 0x2c3000002c60, 0x2c6100002c62, 0x2c6500002c67, 0x2c6800002c69, @@ -1652,8 +1712,7 @@ codepoint_classes = { 0x31a0000031c0, 0x31f000003200, 0x340000004dc0, - 0x4e0000009ffd, - 0xa0000000a48d, + 0x4e000000a48d, 0xa4d00000a4fe, 0xa5000000a60d, 0xa6100000a62c, @@ -1766,9 +1825,16 @@ codepoint_classes = { 0xa7bb0000a7bc, 0xa7bd0000a7be, 0xa7bf0000a7c0, + 0xa7c10000a7c2, 0xa7c30000a7c4, 0xa7c80000a7c9, 0xa7ca0000a7cb, + 0xa7d10000a7d2, + 0xa7d30000a7d4, + 0xa7d50000a7d6, + 0xa7d70000a7d8, + 0xa7d90000a7da, + 0xa7f20000a7f5, 0xa7f60000a7f8, 0xa7fa0000a828, 0xa82c0000a82d, @@ -1796,7 +1862,7 @@ codepoint_classes = { 0xab200000ab27, 0xab280000ab2f, 0xab300000ab5b, - 0xab600000ab6a, + 0xab600000ab69, 0xabc00000abeb, 0xabec0000abee, 0xabf00000abfa, @@ -1834,9 +1900,16 @@ codepoint_classes = { 0x104d8000104fc, 0x1050000010528, 0x1053000010564, + 0x10597000105a2, + 0x105a3000105b2, + 0x105b3000105ba, + 0x105bb000105bd, 0x1060000010737, 0x1074000010756, 0x1076000010768, + 0x1078000010786, + 0x10787000107b1, + 0x107b2000107bb, 0x1080000010806, 0x1080800010809, 0x1080a00010836, @@ -1873,14 +1946,16 @@ codepoint_classes = { 0x10e8000010eaa, 0x10eab00010ead, 0x10eb000010eb2, - 0x10f0000010f1d, + 0x10efd00010f1d, 0x10f2700010f28, 0x10f3000010f51, + 0x10f7000010f86, 0x10fb000010fc5, 0x10fe000010ff7, 0x1100000011047, - 0x1106600011070, + 0x1106600011076, 0x1107f000110bb, + 0x110c2000110c3, 0x110d0000110e9, 0x110f0000110fa, 0x1110000011135, @@ -1894,7 +1969,7 @@ codepoint_classes = { 0x111dc000111dd, 0x1120000011212, 0x1121300011238, - 0x1123e0001123f, + 0x1123e00011242, 0x1128000011287, 0x1128800011289, 0x1128a0001128e, @@ -1934,6 +2009,7 @@ codepoint_classes = { 0x117000001171b, 0x1171d0001172c, 0x117300001173a, + 0x1174000011747, 0x118000001183b, 0x118c0000118ea, 0x118ff00011907, @@ -1952,7 +2028,7 @@ codepoint_classes = { 0x11a4700011a48, 0x11a5000011a9a, 0x11a9d00011a9e, - 0x11ac000011af9, + 0x11ab000011af9, 0x11c0000011c09, 0x11c0a00011c37, 0x11c3800011c41, @@ -1974,14 +2050,22 @@ codepoint_classes = { 0x11d9300011d99, 0x11da000011daa, 0x11ee000011ef7, + 0x11f0000011f11, + 0x11f1200011f3b, + 0x11f3e00011f43, + 0x11f5000011f5a, 0x11fb000011fb1, 0x120000001239a, 0x1248000012544, - 0x130000001342f, + 0x12f9000012ff1, + 0x1300000013430, + 0x1344000013456, 0x1440000014647, 0x1680000016a39, 0x16a4000016a5f, 0x16a6000016a6a, + 0x16a7000016abf, + 0x16ac000016aca, 0x16ad000016aee, 0x16af000016af5, 0x16b0000016b37, @@ -1999,8 +2083,13 @@ codepoint_classes = { 0x17000000187f8, 0x1880000018cd6, 0x18d0000018d09, - 0x1b0000001b11f, + 0x1aff00001aff4, + 0x1aff50001affc, + 0x1affd0001afff, + 0x1b0000001b123, + 0x1b1320001b133, 0x1b1500001b153, + 0x1b1550001b156, 0x1b1640001b168, 0x1b1700001b2fc, 0x1bc000001bc6b, @@ -2008,33 +2097,45 @@ codepoint_classes = { 0x1bc800001bc89, 0x1bc900001bc9a, 0x1bc9d0001bc9f, + 0x1cf000001cf2e, + 0x1cf300001cf47, 0x1da000001da37, 0x1da3b0001da6d, 0x1da750001da76, 0x1da840001da85, 0x1da9b0001daa0, 0x1daa10001dab0, + 0x1df000001df1f, + 0x1df250001df2b, 0x1e0000001e007, 0x1e0080001e019, 0x1e01b0001e022, 0x1e0230001e025, 0x1e0260001e02b, + 0x1e0300001e06e, + 0x1e08f0001e090, 0x1e1000001e12d, 0x1e1300001e13e, 0x1e1400001e14a, 0x1e14e0001e14f, + 0x1e2900001e2af, 0x1e2c00001e2fa, + 0x1e4d00001e4fa, + 0x1e7e00001e7e7, + 0x1e7e80001e7ec, + 0x1e7ed0001e7ef, + 0x1e7f00001e7ff, 0x1e8000001e8c5, 0x1e8d00001e8d7, 0x1e9220001e94c, 0x1e9500001e95a, - 0x1fbf00001fbfa, - 0x200000002a6de, - 0x2a7000002b735, + 0x200000002a6e0, + 0x2a7000002b73a, 0x2b7400002b81e, 0x2b8200002cea2, 0x2ceb00002ebe1, 0x300000003134b, + 0x31350000323b0, ), 'CONTEXTJ': ( 0x200c0000200e, diff --git a/env/Lib/site-packages/pip/_vendor/idna/intranges.py b/env/Lib/site-packages/pip/_vendor/idna/intranges.py index fa8a735662d0afc1950ba75bc57e99e8480dac88..6a43b0475347cb50d0d65ada1000a82eeca9e882 100644 --- a/env/Lib/site-packages/pip/_vendor/idna/intranges.py +++ b/env/Lib/site-packages/pip/_vendor/idna/intranges.py @@ -6,8 +6,9 @@ in the original list?" in time O(log(# runs)). """ import bisect +from typing import List, Tuple -def intranges_from_list(list_): +def intranges_from_list(list_: List[int]) -> Tuple[int, ...]: """Represent a list of integers as a sequence of ranges: ((start_0, end_0), (start_1, end_1), ...), such that the original integers are exactly those x such that start_i <= x < end_i for some i. @@ -28,14 +29,14 @@ def intranges_from_list(list_): return tuple(ranges) -def _encode_range(start, end): +def _encode_range(start: int, end: int) -> int: return (start << 32) | end -def _decode_range(r): +def _decode_range(r: int) -> Tuple[int, int]: return (r >> 32), (r & ((1 << 32) - 1)) -def intranges_contain(int_, ranges): +def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool: """Determine if `int_` falls into one of the ranges in `ranges`.""" tuple_ = _encode_range(int_, 0) pos = bisect.bisect_left(ranges, tuple_) diff --git a/env/Lib/site-packages/pip/_vendor/idna/package_data.py b/env/Lib/site-packages/pip/_vendor/idna/package_data.py index 1420ea2f1461be009a3fbfd1c0295c0c1deb3153..8501893bd153b7216524084cad23e90aeac0b1f8 100644 --- a/env/Lib/site-packages/pip/_vendor/idna/package_data.py +++ b/env/Lib/site-packages/pip/_vendor/idna/package_data.py @@ -1,2 +1,2 @@ -__version__ = '3.1' +__version__ = '3.4' diff --git a/env/Lib/site-packages/pip/_vendor/idna/uts46data.py b/env/Lib/site-packages/pip/_vendor/idna/uts46data.py index 8ae36cbe7f1196856fc6b5c0987c95c9d077fabd..186796c17b25c1e766112ef4d9f16bb2dea4b306 100644 --- a/env/Lib/site-packages/pip/_vendor/idna/uts46data.py +++ b/env/Lib/site-packages/pip/_vendor/idna/uts46data.py @@ -1,10 +1,14 @@ # This file is automatically generated by tools/idna-data +# vim: set fileencoding=utf-8 : + +from typing import List, Tuple, Union + """IDNA Mapping Table from UTS46.""" -__version__ = '13.0.0' -def _seg_0(): +__version__ = '15.0.0' +def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x0, '3'), (0x1, '3'), @@ -108,7 +112,7 @@ def _seg_0(): (0x63, 'V'), ] -def _seg_1(): +def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x64, 'V'), (0x65, 'V'), @@ -212,7 +216,7 @@ def _seg_1(): (0xC7, 'M', 'ç'), ] -def _seg_2(): +def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0xC8, 'M', 'è'), (0xC9, 'M', 'é'), @@ -316,7 +320,7 @@ def _seg_2(): (0x12B, 'V'), ] -def _seg_3(): +def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x12C, 'M', 'ĭ'), (0x12D, 'V'), @@ -420,7 +424,7 @@ def _seg_3(): (0x193, 'M', 'ɠ'), ] -def _seg_4(): +def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x194, 'M', 'ɣ'), (0x195, 'V'), @@ -524,7 +528,7 @@ def _seg_4(): (0x20C, 'M', 'ȍ'), ] -def _seg_5(): +def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x20D, 'V'), (0x20E, 'M', 'ȏ'), @@ -628,7 +632,7 @@ def _seg_5(): (0x377, 'V'), ] -def _seg_6(): +def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x378, 'X'), (0x37A, '3', ' ι'), @@ -732,7 +736,7 @@ def _seg_6(): (0x402, 'M', 'ђ'), ] -def _seg_7(): +def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x403, 'M', 'ѓ'), (0x404, 'M', 'є'), @@ -836,7 +840,7 @@ def _seg_7(): (0x49D, 'V'), ] -def _seg_8(): +def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x49E, 'M', 'ҟ'), (0x49F, 'V'), @@ -940,7 +944,7 @@ def _seg_8(): (0x502, 'M', 'ԃ'), ] -def _seg_9(): +def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x503, 'V'), (0x504, 'M', 'ԅ'), @@ -1041,10 +1045,10 @@ def _seg_9(): (0x5F5, 'X'), (0x606, 'V'), (0x61C, 'X'), - (0x61E, 'V'), + (0x61D, 'V'), ] -def _seg_10(): +def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x675, 'M', 'اٴ'), (0x676, 'M', 'وٴ'), @@ -1070,11 +1074,9 @@ def _seg_10(): (0x85F, 'X'), (0x860, 'V'), (0x86B, 'X'), - (0x8A0, 'V'), - (0x8B5, 'X'), - (0x8B6, 'V'), - (0x8C8, 'X'), - (0x8D3, 'V'), + (0x870, 'V'), + (0x88F, 'X'), + (0x898, 'V'), (0x8E2, 'X'), (0x8E3, 'V'), (0x958, 'M', 'क़'), @@ -1146,12 +1148,12 @@ def _seg_10(): (0xA59, 'M', 'ਖ਼'), (0xA5A, 'M', 'ਗ਼'), (0xA5B, 'M', 'ਜ਼'), + (0xA5C, 'V'), + (0xA5D, 'X'), ] -def _seg_11(): +def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ - (0xA5C, 'V'), - (0xA5D, 'X'), (0xA5E, 'M', 'ਫ਼'), (0xA5F, 'X'), (0xA66, 'V'), @@ -1250,14 +1252,14 @@ def _seg_11(): (0xC0E, 'V'), (0xC11, 'X'), (0xC12, 'V'), + (0xC29, 'X'), + (0xC2A, 'V'), ] -def _seg_12(): +def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ - (0xC29, 'X'), - (0xC2A, 'V'), (0xC3A, 'X'), - (0xC3D, 'V'), + (0xC3C, 'V'), (0xC45, 'X'), (0xC46, 'V'), (0xC49, 'X'), @@ -1267,6 +1269,8 @@ def _seg_12(): (0xC57, 'X'), (0xC58, 'V'), (0xC5B, 'X'), + (0xC5D, 'V'), + (0xC5E, 'X'), (0xC60, 'V'), (0xC64, 'X'), (0xC66, 'V'), @@ -1289,14 +1293,14 @@ def _seg_12(): (0xCCE, 'X'), (0xCD5, 'V'), (0xCD7, 'X'), - (0xCDE, 'V'), + (0xCDD, 'V'), (0xCDF, 'X'), (0xCE0, 'V'), (0xCE4, 'X'), (0xCE6, 'V'), (0xCF0, 'X'), (0xCF1, 'V'), - (0xCF3, 'X'), + (0xCF4, 'X'), (0xD00, 'V'), (0xD0D, 'X'), (0xD0E, 'V'), @@ -1356,7 +1360,7 @@ def _seg_12(): (0xEB4, 'V'), ] -def _seg_13(): +def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0xEBE, 'X'), (0xEC0, 'V'), @@ -1364,7 +1368,7 @@ def _seg_13(): (0xEC6, 'V'), (0xEC7, 'X'), (0xEC8, 'V'), - (0xECE, 'X'), + (0xECF, 'X'), (0xED0, 'V'), (0xEDA, 'X'), (0xEDC, 'M', 'ຫນ'), @@ -1460,7 +1464,7 @@ def _seg_13(): (0x1312, 'V'), ] -def _seg_14(): +def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ (0x1316, 'X'), (0x1318, 'V'), @@ -1485,10 +1489,8 @@ def _seg_14(): (0x16A0, 'V'), (0x16F9, 'X'), (0x1700, 'V'), - (0x170D, 'X'), - (0x170E, 'V'), - (0x1715, 'X'), - (0x1720, 'V'), + (0x1716, 'X'), + (0x171F, 'V'), (0x1737, 'X'), (0x1740, 'V'), (0x1754, 'X'), @@ -1511,6 +1513,7 @@ def _seg_14(): (0x1807, 'V'), (0x180B, 'I'), (0x180E, 'X'), + (0x180F, 'I'), (0x1810, 'V'), (0x181A, 'X'), (0x1820, 'V'), @@ -1550,11 +1553,11 @@ def _seg_14(): (0x1AA0, 'V'), (0x1AAE, 'X'), (0x1AB0, 'V'), - (0x1AC1, 'X'), + (0x1ACF, 'X'), (0x1B00, 'V'), - (0x1B4C, 'X'), + (0x1B4D, 'X'), (0x1B50, 'V'), - (0x1B7D, 'X'), + (0x1B7F, 'X'), (0x1B80, 'V'), (0x1BF4, 'X'), (0x1BFC, 'V'), @@ -1562,11 +1565,11 @@ def _seg_14(): (0x1C3B, 'V'), (0x1C4A, 'X'), (0x1C4D, 'V'), + (0x1C80, 'M', 'в'), ] -def _seg_15(): +def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ - (0x1C80, 'M', 'в'), (0x1C81, 'M', 'д'), (0x1C82, 'M', 'о'), (0x1C83, 'M', 'с'), @@ -1666,11 +1669,11 @@ def _seg_15(): (0x1D50, 'M', 'm'), (0x1D51, 'M', 'ŋ'), (0x1D52, 'M', 'o'), + (0x1D53, 'M', 'ɔ'), ] -def _seg_16(): +def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: return [ - (0x1D53, 'M', 'ɔ'), (0x1D54, 'M', 'ᴖ'), (0x1D55, 'M', 'ᴗ'), (0x1D56, 'M', 'p'), @@ -1735,8 +1738,6 @@ def _seg_16(): (0x1DBE, 'M', 'ʒ'), (0x1DBF, 'M', 'θ'), (0x1DC0, 'V'), - (0x1DFA, 'X'), - (0x1DFB, 'V'), (0x1E00, 'M', 'ḁ'), (0x1E01, 'V'), (0x1E02, 'M', 'ḃ'), @@ -1770,13 +1771,13 @@ def _seg_16(): (0x1E1E, 'M', 'ḟ'), (0x1E1F, 'V'), (0x1E20, 'M', 'ḡ'), - ] - -def _seg_17(): - return [ (0x1E21, 'V'), (0x1E22, 'M', 'ḣ'), (0x1E23, 'V'), + ] + +def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1E24, 'M', 'ḥ'), (0x1E25, 'V'), (0x1E26, 'M', 'ḧ'), @@ -1874,13 +1875,13 @@ def _seg_17(): (0x1E82, 'M', 'ẃ'), (0x1E83, 'V'), (0x1E84, 'M', 'ẅ'), - ] - -def _seg_18(): - return [ (0x1E85, 'V'), (0x1E86, 'M', 'ẇ'), (0x1E87, 'V'), + ] + +def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1E88, 'M', 'ẉ'), (0x1E89, 'V'), (0x1E8A, 'M', 'ẋ'), @@ -1978,13 +1979,13 @@ def _seg_18(): (0x1EEB, 'V'), (0x1EEC, 'M', 'ử'), (0x1EED, 'V'), - ] - -def _seg_19(): - return [ (0x1EEE, 'M', 'ữ'), (0x1EEF, 'V'), (0x1EF0, 'M', 'ự'), + ] + +def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1EF1, 'V'), (0x1EF2, 'M', 'ỳ'), (0x1EF3, 'V'), @@ -2082,13 +2083,13 @@ def _seg_19(): (0x1F82, 'M', 'ἂι'), (0x1F83, 'M', 'ἃι'), (0x1F84, 'M', 'ἄι'), - ] - -def _seg_20(): - return [ (0x1F85, 'M', 'ἅι'), (0x1F86, 'M', 'ἆι'), (0x1F87, 'M', 'ἇι'), + ] + +def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1F88, 'M', 'ἀι'), (0x1F89, 'M', 'ἁι'), (0x1F8A, 'M', 'ἂι'), @@ -2186,13 +2187,13 @@ def _seg_20(): (0x1FF0, 'X'), (0x1FF2, 'M', 'ὼι'), (0x1FF3, 'M', 'ωι'), - ] - -def _seg_21(): - return [ (0x1FF4, 'M', 'ώι'), (0x1FF5, 'X'), (0x1FF6, 'V'), + ] + +def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1FF7, 'M', 'ῶι'), (0x1FF8, 'M', 'ὸ'), (0x1FF9, 'M', 'ό'), @@ -2285,18 +2286,18 @@ def _seg_21(): (0x20A0, 'V'), (0x20A8, 'M', 'rs'), (0x20A9, 'V'), - (0x20C0, 'X'), + (0x20C1, 'X'), (0x20D0, 'V'), (0x20F1, 'X'), (0x2100, '3', 'a/c'), (0x2101, '3', 'a/s'), - ] - -def _seg_22(): - return [ (0x2102, 'M', 'c'), (0x2103, 'M', '°c'), (0x2104, 'V'), + ] + +def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2105, '3', 'c/o'), (0x2106, '3', 'c/u'), (0x2107, 'M', 'ɛ'), @@ -2394,13 +2395,13 @@ def _seg_22(): (0x2177, 'M', 'viii'), (0x2178, 'M', 'ix'), (0x2179, 'M', 'x'), - ] - -def _seg_23(): - return [ (0x217A, 'M', 'xi'), (0x217B, 'M', 'xii'), (0x217C, 'M', 'l'), + ] + +def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x217D, 'M', 'c'), (0x217E, 'M', 'd'), (0x217F, 'M', 'm'), @@ -2498,13 +2499,13 @@ def _seg_23(): (0x24B7, 'M', 'b'), (0x24B8, 'M', 'c'), (0x24B9, 'M', 'd'), - ] - -def _seg_24(): - return [ (0x24BA, 'M', 'e'), (0x24BB, 'M', 'f'), (0x24BC, 'M', 'g'), + ] + +def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x24BD, 'M', 'h'), (0x24BE, 'M', 'i'), (0x24BF, 'M', 'j'), @@ -2602,22 +2603,21 @@ def _seg_24(): (0x2C23, 'M', 'ⱓ'), (0x2C24, 'M', 'ⱔ'), (0x2C25, 'M', 'ⱕ'), - ] - -def _seg_25(): - return [ (0x2C26, 'M', 'ⱖ'), (0x2C27, 'M', 'ⱗ'), (0x2C28, 'M', 'ⱘ'), + ] + +def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2C29, 'M', 'ⱙ'), (0x2C2A, 'M', 'ⱚ'), (0x2C2B, 'M', 'ⱛ'), (0x2C2C, 'M', 'ⱜ'), (0x2C2D, 'M', 'ⱝ'), (0x2C2E, 'M', 'ⱞ'), - (0x2C2F, 'X'), + (0x2C2F, 'M', 'ⱟ'), (0x2C30, 'V'), - (0x2C5F, 'X'), (0x2C60, 'M', 'ⱡ'), (0x2C61, 'V'), (0x2C62, 'M', 'ɫ'), @@ -2706,14 +2706,14 @@ def _seg_25(): (0x2CBC, 'M', 'ⲽ'), (0x2CBD, 'V'), (0x2CBE, 'M', 'ⲿ'), - ] - -def _seg_26(): - return [ (0x2CBF, 'V'), (0x2CC0, 'M', 'ⳁ'), (0x2CC1, 'V'), (0x2CC2, 'M', 'ⳃ'), + ] + +def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2CC3, 'V'), (0x2CC4, 'M', 'ⳅ'), (0x2CC5, 'V'), @@ -2784,7 +2784,7 @@ def _seg_26(): (0x2DD8, 'V'), (0x2DDF, 'X'), (0x2DE0, 'V'), - (0x2E53, 'X'), + (0x2E5E, 'X'), (0x2E80, 'V'), (0x2E9A, 'X'), (0x2E9B, 'V'), @@ -2810,14 +2810,14 @@ def _seg_26(): (0x2F0F, 'M', '几'), (0x2F10, 'M', '凵'), (0x2F11, 'M', '刀'), - ] - -def _seg_27(): - return [ (0x2F12, 'M', '力'), (0x2F13, 'M', '勹'), (0x2F14, 'M', '匕'), (0x2F15, 'M', '匚'), + ] + +def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F16, 'M', '匸'), (0x2F17, 'M', '十'), (0x2F18, 'M', '卜'), @@ -2914,14 +2914,14 @@ def _seg_27(): (0x2F73, 'M', '穴'), (0x2F74, 'M', '立'), (0x2F75, 'M', '竹'), - ] - -def _seg_28(): - return [ (0x2F76, 'M', '米'), (0x2F77, 'M', '糸'), (0x2F78, 'M', '缶'), (0x2F79, 'M', '网'), + ] + +def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F7A, 'M', '羊'), (0x2F7B, 'M', '羽'), (0x2F7C, 'M', '老'), @@ -3018,14 +3018,14 @@ def _seg_28(): (0x3000, '3', ' '), (0x3001, 'V'), (0x3002, 'M', '.'), - ] - -def _seg_29(): - return [ (0x3003, 'V'), (0x3036, 'M', '〒'), (0x3037, 'V'), (0x3038, 'M', '十'), + ] + +def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x3039, 'M', '卄'), (0x303A, 'M', '卅'), (0x303B, 'V'), @@ -3122,14 +3122,14 @@ def _seg_29(): (0x317E, 'M', 'ᄶ'), (0x317F, 'M', 'ᅀ'), (0x3180, 'M', 'ᅇ'), - ] - -def _seg_30(): - return [ (0x3181, 'M', 'ᅌ'), (0x3182, 'M', 'ᇱ'), (0x3183, 'M', 'ᇲ'), (0x3184, 'M', 'ᅗ'), + ] + +def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x3185, 'M', 'ᅘ'), (0x3186, 'M', 'ᅙ'), (0x3187, 'M', 'ᆄ'), @@ -3226,14 +3226,14 @@ def _seg_30(): (0x3240, '3', '(祭)'), (0x3241, '3', '(休)'), (0x3242, '3', '(自)'), - ] - -def _seg_31(): - return [ (0x3243, '3', '(至)'), (0x3244, 'M', '問'), (0x3245, 'M', '幼'), (0x3246, 'M', '文'), + ] + +def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x3247, 'M', '箏'), (0x3248, 'V'), (0x3250, 'M', 'pte'), @@ -3330,14 +3330,14 @@ def _seg_31(): (0x32AB, 'M', '学'), (0x32AC, 'M', '監'), (0x32AD, 'M', '企'), - ] - -def _seg_32(): - return [ (0x32AE, 'M', '資'), (0x32AF, 'M', '協'), (0x32B0, 'M', '夜'), (0x32B1, 'M', '36'), + ] + +def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x32B2, 'M', '37'), (0x32B3, 'M', '38'), (0x32B4, 'M', '39'), @@ -3434,14 +3434,14 @@ def _seg_32(): (0x330F, 'M', 'ガンマ'), (0x3310, 'M', 'ギガ'), (0x3311, 'M', 'ギニー'), - ] - -def _seg_33(): - return [ (0x3312, 'M', 'キュリー'), (0x3313, 'M', 'ギルダー'), (0x3314, 'M', 'キロ'), (0x3315, 'M', 'キログラム'), + ] + +def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x3316, 'M', 'キロメートル'), (0x3317, 'M', 'キロワット'), (0x3318, 'M', 'グラム'), @@ -3538,14 +3538,14 @@ def _seg_33(): (0x3373, 'M', 'au'), (0x3374, 'M', 'bar'), (0x3375, 'M', 'ov'), - ] - -def _seg_34(): - return [ (0x3376, 'M', 'pc'), (0x3377, 'M', 'dm'), (0x3378, 'M', 'dm2'), (0x3379, 'M', 'dm3'), + ] + +def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x337A, 'M', 'iu'), (0x337B, 'M', '平成'), (0x337C, 'M', '昭和'), @@ -3642,14 +3642,14 @@ def _seg_34(): (0x33D7, 'M', 'ph'), (0x33D8, 'X'), (0x33D9, 'M', 'ppm'), - ] - -def _seg_35(): - return [ (0x33DA, 'M', 'pr'), (0x33DB, 'M', 'sr'), (0x33DC, 'M', 'sv'), (0x33DD, 'M', 'wb'), + ] + +def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x33DE, 'M', 'v∕m'), (0x33DF, 'M', 'a∕m'), (0x33E0, 'M', '1日'), @@ -3685,8 +3685,6 @@ def _seg_35(): (0x33FE, 'M', '31日'), (0x33FF, 'M', 'gal'), (0x3400, 'V'), - (0x9FFD, 'X'), - (0xA000, 'V'), (0xA48D, 'X'), (0xA490, 'V'), (0xA4C7, 'X'), @@ -3746,16 +3744,16 @@ def _seg_35(): (0xA685, 'V'), (0xA686, 'M', 'ꚇ'), (0xA687, 'V'), - ] - -def _seg_36(): - return [ (0xA688, 'M', 'ꚉ'), (0xA689, 'V'), (0xA68A, 'M', 'ꚋ'), (0xA68B, 'V'), (0xA68C, 'M', 'ꚍ'), (0xA68D, 'V'), + ] + +def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xA68E, 'M', 'ꚏ'), (0xA68F, 'V'), (0xA690, 'M', 'ꚑ'), @@ -3850,16 +3848,16 @@ def _seg_36(): (0xA76C, 'M', 'ꝭ'), (0xA76D, 'V'), (0xA76E, 'M', 'ꝯ'), - ] - -def _seg_37(): - return [ (0xA76F, 'V'), (0xA770, 'M', 'ꝯ'), (0xA771, 'V'), (0xA779, 'M', 'ꝺ'), (0xA77A, 'V'), (0xA77B, 'M', 'ꝼ'), + ] + +def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xA77C, 'V'), (0xA77D, 'M', 'ᵹ'), (0xA77E, 'M', 'ꝿ'), @@ -3922,7 +3920,8 @@ def _seg_37(): (0xA7BD, 'V'), (0xA7BE, 'M', 'ꞿ'), (0xA7BF, 'V'), - (0xA7C0, 'X'), + (0xA7C0, 'M', 'ꟁ'), + (0xA7C1, 'V'), (0xA7C2, 'M', 'ꟃ'), (0xA7C3, 'V'), (0xA7C4, 'M', 'ꞔ'), @@ -3933,6 +3932,20 @@ def _seg_37(): (0xA7C9, 'M', 'ꟊ'), (0xA7CA, 'V'), (0xA7CB, 'X'), + (0xA7D0, 'M', 'ꟑ'), + (0xA7D1, 'V'), + (0xA7D2, 'X'), + (0xA7D3, 'V'), + (0xA7D4, 'X'), + (0xA7D5, 'V'), + (0xA7D6, 'M', 'ꟗ'), + (0xA7D7, 'V'), + (0xA7D8, 'M', 'ꟙ'), + (0xA7D9, 'V'), + (0xA7DA, 'X'), + (0xA7F2, 'M', 'c'), + (0xA7F3, 'M', 'f'), + (0xA7F4, 'M', 'q'), (0xA7F5, 'M', 'ꟶ'), (0xA7F6, 'V'), (0xA7F8, 'M', 'ħ'), @@ -3945,6 +3958,10 @@ def _seg_37(): (0xA878, 'X'), (0xA880, 'V'), (0xA8C6, 'X'), + ] + +def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xA8CE, 'V'), (0xA8DA, 'X'), (0xA8E0, 'V'), @@ -3954,10 +3971,6 @@ def _seg_37(): (0xA980, 'V'), (0xA9CE, 'X'), (0xA9CF, 'V'), - ] - -def _seg_38(): - return [ (0xA9DA, 'X'), (0xA9DE, 'V'), (0xA9FF, 'X'), @@ -4049,6 +4062,10 @@ def _seg_38(): (0xABA8, 'M', 'Ꮨ'), (0xABA9, 'M', 'Ꮩ'), (0xABAA, 'M', 'Ꮪ'), + ] + +def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xABAB, 'M', 'Ꮫ'), (0xABAC, 'M', 'Ꮬ'), (0xABAD, 'M', 'Ꮭ'), @@ -4058,10 +4075,6 @@ def _seg_38(): (0xABB1, 'M', 'Ꮱ'), (0xABB2, 'M', 'Ꮲ'), (0xABB3, 'M', 'Ꮳ'), - ] - -def _seg_39(): - return [ (0xABB4, 'M', 'Ꮴ'), (0xABB5, 'M', 'Ꮵ'), (0xABB6, 'M', 'Ꮶ'), @@ -4153,6 +4166,10 @@ def _seg_39(): (0xF943, 'M', '弄'), (0xF944, 'M', '籠'), (0xF945, 'M', '聾'), + ] + +def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xF946, 'M', '牢'), (0xF947, 'M', '磊'), (0xF948, 'M', '賂'), @@ -4162,10 +4179,6 @@ def _seg_39(): (0xF94C, 'M', '樓'), (0xF94D, 'M', '淚'), (0xF94E, 'M', '漏'), - ] - -def _seg_40(): - return [ (0xF94F, 'M', '累'), (0xF950, 'M', '縷'), (0xF951, 'M', '陋'), @@ -4257,6 +4270,10 @@ def _seg_40(): (0xF9A7, 'M', '獵'), (0xF9A8, 'M', '令'), (0xF9A9, 'M', '囹'), + ] + +def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xF9AA, 'M', '寧'), (0xF9AB, 'M', '嶺'), (0xF9AC, 'M', '怜'), @@ -4266,10 +4283,6 @@ def _seg_40(): (0xF9B0, 'M', '聆'), (0xF9B1, 'M', '鈴'), (0xF9B2, 'M', '零'), - ] - -def _seg_41(): - return [ (0xF9B3, 'M', '靈'), (0xF9B4, 'M', '領'), (0xF9B5, 'M', '例'), @@ -4361,6 +4374,10 @@ def _seg_41(): (0xFA0B, 'M', '廓'), (0xFA0C, 'M', '兀'), (0xFA0D, 'M', '嗀'), + ] + +def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFA0E, 'V'), (0xFA10, 'M', '塚'), (0xFA11, 'V'), @@ -4370,10 +4387,6 @@ def _seg_41(): (0xFA16, 'M', '猪'), (0xFA17, 'M', '益'), (0xFA18, 'M', '礼'), - ] - -def _seg_42(): - return [ (0xFA19, 'M', '神'), (0xFA1A, 'M', '祥'), (0xFA1B, 'M', '福'), @@ -4465,6 +4478,10 @@ def _seg_42(): (0xFA76, 'M', '勇'), (0xFA77, 'M', '勺'), (0xFA78, 'M', '喝'), + ] + +def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFA79, 'M', '啕'), (0xFA7A, 'M', '喙'), (0xFA7B, 'M', '嗢'), @@ -4474,10 +4491,6 @@ def _seg_42(): (0xFA7F, 'M', '奔'), (0xFA80, 'M', '婢'), (0xFA81, 'M', '嬨'), - ] - -def _seg_43(): - return [ (0xFA82, 'M', '廒'), (0xFA83, 'M', '廙'), (0xFA84, 'M', '彩'), @@ -4569,6 +4582,10 @@ def _seg_43(): (0xFADA, 'X'), (0xFB00, 'M', 'ff'), (0xFB01, 'M', 'fi'), + ] + +def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFB02, 'M', 'fl'), (0xFB03, 'M', 'ffi'), (0xFB04, 'M', 'ffl'), @@ -4578,10 +4595,6 @@ def _seg_43(): (0xFB14, 'M', 'մե'), (0xFB15, 'M', 'մի'), (0xFB16, 'M', 'վն'), - ] - -def _seg_44(): - return [ (0xFB17, 'M', 'մխ'), (0xFB18, 'X'), (0xFB1D, 'M', 'יִ'), @@ -4666,13 +4679,17 @@ def _seg_44(): (0xFBAE, 'M', 'ے'), (0xFBB0, 'M', 'ۓ'), (0xFBB2, 'V'), - (0xFBC2, 'X'), + (0xFBC3, 'X'), (0xFBD3, 'M', 'ڭ'), (0xFBD7, 'M', 'ۇ'), (0xFBD9, 'M', 'ۆ'), (0xFBDB, 'M', 'ۈ'), (0xFBDD, 'M', 'ۇٴ'), (0xFBDE, 'M', 'ۋ'), + ] + +def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFBE0, 'M', 'ۅ'), (0xFBE2, 'M', 'ۉ'), (0xFBE4, 'M', 'ې'), @@ -4682,10 +4699,6 @@ def _seg_44(): (0xFBEE, 'M', 'ئو'), (0xFBF0, 'M', 'ئۇ'), (0xFBF2, 'M', 'ئۆ'), - ] - -def _seg_45(): - return [ (0xFBF4, 'M', 'ئۈ'), (0xFBF6, 'M', 'ئې'), (0xFBF9, 'M', 'ئى'), @@ -4777,6 +4790,10 @@ def _seg_45(): (0xFC54, 'M', 'هي'), (0xFC55, 'M', 'يج'), (0xFC56, 'M', 'يح'), + ] + +def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFC57, 'M', 'يخ'), (0xFC58, 'M', 'يم'), (0xFC59, 'M', 'يى'), @@ -4786,10 +4803,6 @@ def _seg_45(): (0xFC5D, 'M', 'ىٰ'), (0xFC5E, '3', ' ٌّ'), (0xFC5F, '3', ' ٍّ'), - ] - -def _seg_46(): - return [ (0xFC60, '3', ' َّ'), (0xFC61, '3', ' ُّ'), (0xFC62, '3', ' ِّ'), @@ -4881,6 +4894,10 @@ def _seg_46(): (0xFCB8, 'M', 'طح'), (0xFCB9, 'M', 'ظم'), (0xFCBA, 'M', 'عج'), + ] + +def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFCBB, 'M', 'عم'), (0xFCBC, 'M', 'غج'), (0xFCBD, 'M', 'غم'), @@ -4890,10 +4907,6 @@ def _seg_46(): (0xFCC1, 'M', 'فم'), (0xFCC2, 'M', 'قح'), (0xFCC3, 'M', 'قم'), - ] - -def _seg_47(): - return [ (0xFCC4, 'M', 'كج'), (0xFCC5, 'M', 'كح'), (0xFCC6, 'M', 'كخ'), @@ -4985,6 +4998,10 @@ def _seg_47(): (0xFD1C, 'M', 'حي'), (0xFD1D, 'M', 'جى'), (0xFD1E, 'M', 'جي'), + ] + +def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFD1F, 'M', 'خى'), (0xFD20, 'M', 'خي'), (0xFD21, 'M', 'صى'), @@ -4994,10 +5011,6 @@ def _seg_47(): (0xFD25, 'M', 'شج'), (0xFD26, 'M', 'شح'), (0xFD27, 'M', 'شخ'), - ] - -def _seg_48(): - return [ (0xFD28, 'M', 'شم'), (0xFD29, 'M', 'شر'), (0xFD2A, 'M', 'سر'), @@ -5020,7 +5033,6 @@ def _seg_48(): (0xFD3B, 'M', 'ظم'), (0xFD3C, 'M', 'اً'), (0xFD3E, 'V'), - (0xFD40, 'X'), (0xFD50, 'M', 'تجم'), (0xFD51, 'M', 'تحج'), (0xFD53, 'M', 'تحم'), @@ -5090,6 +5102,10 @@ def _seg_48(): (0xFDA4, 'M', 'تمى'), (0xFDA5, 'M', 'جمي'), (0xFDA6, 'M', 'جحى'), + ] + +def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFDA7, 'M', 'جمى'), (0xFDA8, 'M', 'سخى'), (0xFDA9, 'M', 'صحي'), @@ -5098,10 +5114,6 @@ def _seg_48(): (0xFDAC, 'M', 'لجي'), (0xFDAD, 'M', 'لمي'), (0xFDAE, 'M', 'يحي'), - ] - -def _seg_49(): - return [ (0xFDAF, 'M', 'يجي'), (0xFDB0, 'M', 'يمي'), (0xFDB1, 'M', 'ممي'), @@ -5128,6 +5140,8 @@ def _seg_49(): (0xFDC6, 'M', 'سخي'), (0xFDC7, 'M', 'نجي'), (0xFDC8, 'X'), + (0xFDCF, 'V'), + (0xFDD0, 'X'), (0xFDF0, 'M', 'صلے'), (0xFDF1, 'M', 'قلے'), (0xFDF2, 'M', 'الله'), @@ -5142,7 +5156,6 @@ def _seg_49(): (0xFDFB, '3', 'جل جلاله'), (0xFDFC, 'M', 'ریال'), (0xFDFD, 'V'), - (0xFDFE, 'X'), (0xFE00, 'I'), (0xFE10, '3', ','), (0xFE11, 'M', '、'), @@ -5193,6 +5206,10 @@ def _seg_49(): (0xFE5B, '3', '{'), (0xFE5C, '3', '}'), (0xFE5D, 'M', '〔'), + ] + +def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFE5E, 'M', '〕'), (0xFE5F, '3', '#'), (0xFE60, '3', '&'), @@ -5202,10 +5219,6 @@ def _seg_49(): (0xFE64, '3', '<'), (0xFE65, '3', '>'), (0xFE66, '3', '='), - ] - -def _seg_50(): - return [ (0xFE67, 'X'), (0xFE68, '3', '\\'), (0xFE69, '3', '$'), @@ -5297,6 +5310,10 @@ def _seg_50(): (0xFF18, 'M', '8'), (0xFF19, 'M', '9'), (0xFF1A, '3', ':'), + ] + +def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFF1B, '3', ';'), (0xFF1C, '3', '<'), (0xFF1D, '3', '='), @@ -5306,10 +5323,6 @@ def _seg_50(): (0xFF21, 'M', 'a'), (0xFF22, 'M', 'b'), (0xFF23, 'M', 'c'), - ] - -def _seg_51(): - return [ (0xFF24, 'M', 'd'), (0xFF25, 'M', 'e'), (0xFF26, 'M', 'f'), @@ -5401,6 +5414,10 @@ def _seg_51(): (0xFF7C, 'M', 'シ'), (0xFF7D, 'M', 'ス'), (0xFF7E, 'M', 'セ'), + ] + +def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFF7F, 'M', 'ソ'), (0xFF80, 'M', 'タ'), (0xFF81, 'M', 'チ'), @@ -5410,10 +5427,6 @@ def _seg_51(): (0xFF85, 'M', 'ナ'), (0xFF86, 'M', 'ニ'), (0xFF87, 'M', 'ヌ'), - ] - -def _seg_52(): - return [ (0xFF88, 'M', 'ネ'), (0xFF89, 'M', 'ノ'), (0xFF8A, 'M', 'ハ'), @@ -5505,6 +5518,10 @@ def _seg_52(): (0xFFE7, 'X'), (0xFFE8, 'M', '│'), (0xFFE9, 'M', '←'), + ] + +def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0xFFEA, 'M', '↑'), (0xFFEB, 'M', '→'), (0xFFEC, 'M', '↓'), @@ -5514,10 +5531,6 @@ def _seg_52(): (0x10000, 'V'), (0x1000C, 'X'), (0x1000D, 'V'), - ] - -def _seg_53(): - return [ (0x10027, 'X'), (0x10028, 'V'), (0x1003B, 'X'), @@ -5609,6 +5622,10 @@ def _seg_53(): (0x104B3, 'M', '𐓛'), (0x104B4, 'M', '𐓜'), (0x104B5, 'M', '𐓝'), + ] + +def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x104B6, 'M', '𐓞'), (0x104B7, 'M', '𐓟'), (0x104B8, 'M', '𐓠'), @@ -5618,10 +5635,6 @@ def _seg_53(): (0x104BC, 'M', '𐓤'), (0x104BD, 'M', '𐓥'), (0x104BE, 'M', '𐓦'), - ] - -def _seg_54(): - return [ (0x104BF, 'M', '𐓧'), (0x104C0, 'M', '𐓨'), (0x104C1, 'M', '𐓩'), @@ -5651,13 +5664,123 @@ def _seg_54(): (0x10530, 'V'), (0x10564, 'X'), (0x1056F, 'V'), - (0x10570, 'X'), + (0x10570, 'M', '𐖗'), + (0x10571, 'M', '𐖘'), + (0x10572, 'M', '𐖙'), + (0x10573, 'M', '𐖚'), + (0x10574, 'M', '𐖛'), + (0x10575, 'M', '𐖜'), + (0x10576, 'M', '𐖝'), + (0x10577, 'M', '𐖞'), + (0x10578, 'M', '𐖟'), + (0x10579, 'M', '𐖠'), + (0x1057A, 'M', '𐖡'), + (0x1057B, 'X'), + (0x1057C, 'M', '𐖣'), + (0x1057D, 'M', '𐖤'), + (0x1057E, 'M', '𐖥'), + (0x1057F, 'M', '𐖦'), + (0x10580, 'M', '𐖧'), + (0x10581, 'M', '𐖨'), + (0x10582, 'M', '𐖩'), + (0x10583, 'M', '𐖪'), + (0x10584, 'M', '𐖫'), + (0x10585, 'M', '𐖬'), + (0x10586, 'M', '𐖭'), + (0x10587, 'M', '𐖮'), + (0x10588, 'M', '𐖯'), + (0x10589, 'M', '𐖰'), + (0x1058A, 'M', '𐖱'), + (0x1058B, 'X'), + (0x1058C, 'M', '𐖳'), + (0x1058D, 'M', '𐖴'), + (0x1058E, 'M', '𐖵'), + (0x1058F, 'M', '𐖶'), + (0x10590, 'M', '𐖷'), + (0x10591, 'M', '𐖸'), + (0x10592, 'M', '𐖹'), + (0x10593, 'X'), + (0x10594, 'M', '𐖻'), + (0x10595, 'M', '𐖼'), + (0x10596, 'X'), + (0x10597, 'V'), + (0x105A2, 'X'), + (0x105A3, 'V'), + (0x105B2, 'X'), + (0x105B3, 'V'), + (0x105BA, 'X'), + (0x105BB, 'V'), + (0x105BD, 'X'), (0x10600, 'V'), (0x10737, 'X'), (0x10740, 'V'), (0x10756, 'X'), (0x10760, 'V'), (0x10768, 'X'), + (0x10780, 'V'), + (0x10781, 'M', 'ː'), + (0x10782, 'M', 'ˑ'), + (0x10783, 'M', 'æ'), + (0x10784, 'M', 'ʙ'), + (0x10785, 'M', 'ɓ'), + (0x10786, 'X'), + (0x10787, 'M', 'ʣ'), + (0x10788, 'M', 'ꭦ'), + ] + +def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x10789, 'M', 'ʥ'), + (0x1078A, 'M', 'ʤ'), + (0x1078B, 'M', 'ɖ'), + (0x1078C, 'M', 'ɗ'), + (0x1078D, 'M', 'ᶑ'), + (0x1078E, 'M', 'ɘ'), + (0x1078F, 'M', 'ɞ'), + (0x10790, 'M', 'ʩ'), + (0x10791, 'M', 'ɤ'), + (0x10792, 'M', 'ɢ'), + (0x10793, 'M', 'ɠ'), + (0x10794, 'M', 'ʛ'), + (0x10795, 'M', 'ħ'), + (0x10796, 'M', 'ʜ'), + (0x10797, 'M', 'ɧ'), + (0x10798, 'M', 'ʄ'), + (0x10799, 'M', 'ʪ'), + (0x1079A, 'M', 'ʫ'), + (0x1079B, 'M', 'ɬ'), + (0x1079C, 'M', '𝼄'), + (0x1079D, 'M', 'ꞎ'), + (0x1079E, 'M', 'ɮ'), + (0x1079F, 'M', '𝼅'), + (0x107A0, 'M', 'ʎ'), + (0x107A1, 'M', '𝼆'), + (0x107A2, 'M', 'ø'), + (0x107A3, 'M', 'ɶ'), + (0x107A4, 'M', 'ɷ'), + (0x107A5, 'M', 'q'), + (0x107A6, 'M', 'ɺ'), + (0x107A7, 'M', '𝼈'), + (0x107A8, 'M', 'ɽ'), + (0x107A9, 'M', 'ɾ'), + (0x107AA, 'M', 'ʀ'), + (0x107AB, 'M', 'ʨ'), + (0x107AC, 'M', 'ʦ'), + (0x107AD, 'M', 'ꭧ'), + (0x107AE, 'M', 'ʧ'), + (0x107AF, 'M', 'ʈ'), + (0x107B0, 'M', 'ⱱ'), + (0x107B1, 'X'), + (0x107B2, 'M', 'ʏ'), + (0x107B3, 'M', 'ʡ'), + (0x107B4, 'M', 'ʢ'), + (0x107B5, 'M', 'ʘ'), + (0x107B6, 'M', 'ǀ'), + (0x107B7, 'M', 'ǁ'), + (0x107B8, 'M', 'ǂ'), + (0x107B9, 'M', '𝼊'), + (0x107BA, 'M', '𝼞'), + (0x107BB, 'X'), (0x10800, 'V'), (0x10806, 'X'), (0x10808, 'V'), @@ -5707,6 +5830,10 @@ def _seg_54(): (0x10A60, 'V'), (0x10AA0, 'X'), (0x10AC0, 'V'), + ] + +def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x10AE7, 'X'), (0x10AEB, 'V'), (0x10AF7, 'X'), @@ -5722,10 +5849,6 @@ def _seg_54(): (0x10B9D, 'X'), (0x10BA9, 'V'), (0x10BB0, 'X'), - ] - -def _seg_55(): - return [ (0x10C00, 'V'), (0x10C49, 'X'), (0x10C80, 'M', '𐳀'), @@ -5794,10 +5917,12 @@ def _seg_55(): (0x10EAE, 'X'), (0x10EB0, 'V'), (0x10EB2, 'X'), - (0x10F00, 'V'), + (0x10EFD, 'V'), (0x10F28, 'X'), (0x10F30, 'V'), (0x10F5A, 'X'), + (0x10F70, 'V'), + (0x10F8A, 'X'), (0x10FB0, 'V'), (0x10FCC, 'X'), (0x10FE0, 'V'), @@ -5805,11 +5930,15 @@ def _seg_55(): (0x11000, 'V'), (0x1104E, 'X'), (0x11052, 'V'), - (0x11070, 'X'), + (0x11076, 'X'), (0x1107F, 'V'), (0x110BD, 'X'), (0x110BE, 'V'), - (0x110C2, 'X'), + ] + +def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x110C3, 'X'), (0x110D0, 'V'), (0x110E9, 'X'), (0x110F0, 'V'), @@ -5826,12 +5955,8 @@ def _seg_55(): (0x111F5, 'X'), (0x11200, 'V'), (0x11212, 'X'), - ] - -def _seg_56(): - return [ (0x11213, 'V'), - (0x1123F, 'X'), + (0x11242, 'X'), (0x11280, 'V'), (0x11287, 'X'), (0x11288, 'V'), @@ -5895,7 +6020,7 @@ def _seg_56(): (0x11660, 'V'), (0x1166D, 'X'), (0x11680, 'V'), - (0x116B9, 'X'), + (0x116BA, 'X'), (0x116C0, 'V'), (0x116CA, 'X'), (0x11700, 'V'), @@ -5903,7 +6028,7 @@ def _seg_56(): (0x1171D, 'V'), (0x1172C, 'X'), (0x11730, 'V'), - (0x11740, 'X'), + (0x11747, 'X'), (0x11800, 'V'), (0x1183C, 'X'), (0x118A0, 'M', '𑣀'), @@ -5913,6 +6038,10 @@ def _seg_56(): (0x118A4, 'M', '𑣄'), (0x118A5, 'M', '𑣅'), (0x118A6, 'M', '𑣆'), + ] + +def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x118A7, 'M', '𑣇'), (0x118A8, 'M', '𑣈'), (0x118A9, 'M', '𑣉'), @@ -5930,10 +6059,6 @@ def _seg_56(): (0x118B5, 'M', '𑣕'), (0x118B6, 'M', '𑣖'), (0x118B7, 'M', '𑣗'), - ] - -def _seg_57(): - return [ (0x118B8, 'M', '𑣘'), (0x118B9, 'M', '𑣙'), (0x118BA, 'M', '𑣚'), @@ -5970,8 +6095,10 @@ def _seg_57(): (0x11A48, 'X'), (0x11A50, 'V'), (0x11AA3, 'X'), - (0x11AC0, 'V'), + (0x11AB0, 'V'), (0x11AF9, 'X'), + (0x11B00, 'V'), + (0x11B0A, 'X'), (0x11C00, 'V'), (0x11C09, 'X'), (0x11C0A, 'V'), @@ -6014,6 +6141,16 @@ def _seg_57(): (0x11DAA, 'X'), (0x11EE0, 'V'), (0x11EF9, 'X'), + (0x11F00, 'V'), + ] + +def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x11F11, 'X'), + (0x11F12, 'V'), + (0x11F3B, 'X'), + (0x11F3E, 'V'), + (0x11F5A, 'X'), (0x11FB0, 'V'), (0x11FB1, 'X'), (0x11FC0, 'V'), @@ -6026,22 +6163,24 @@ def _seg_57(): (0x12475, 'X'), (0x12480, 'V'), (0x12544, 'X'), + (0x12F90, 'V'), + (0x12FF3, 'X'), (0x13000, 'V'), - (0x1342F, 'X'), + (0x13430, 'X'), + (0x13440, 'V'), + (0x13456, 'X'), (0x14400, 'V'), (0x14647, 'X'), (0x16800, 'V'), (0x16A39, 'X'), (0x16A40, 'V'), (0x16A5F, 'X'), - ] - -def _seg_58(): - return [ (0x16A60, 'V'), (0x16A6A, 'X'), (0x16A6E, 'V'), - (0x16A70, 'X'), + (0x16ABF, 'X'), + (0x16AC0, 'V'), + (0x16ACA, 'X'), (0x16AD0, 'V'), (0x16AEE, 'X'), (0x16AF0, 'V'), @@ -6106,10 +6245,24 @@ def _seg_58(): (0x18CD6, 'X'), (0x18D00, 'V'), (0x18D09, 'X'), + (0x1AFF0, 'V'), + ] + +def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1AFF4, 'X'), + (0x1AFF5, 'V'), + (0x1AFFC, 'X'), + (0x1AFFD, 'V'), + (0x1AFFF, 'X'), (0x1B000, 'V'), - (0x1B11F, 'X'), + (0x1B123, 'X'), + (0x1B132, 'V'), + (0x1B133, 'X'), (0x1B150, 'V'), (0x1B153, 'X'), + (0x1B155, 'V'), + (0x1B156, 'X'), (0x1B164, 'V'), (0x1B168, 'X'), (0x1B170, 'V'), @@ -6125,6 +6278,12 @@ def _seg_58(): (0x1BC9C, 'V'), (0x1BCA0, 'I'), (0x1BCA4, 'X'), + (0x1CF00, 'V'), + (0x1CF2E, 'X'), + (0x1CF30, 'V'), + (0x1CF47, 'X'), + (0x1CF50, 'V'), + (0x1CFC4, 'X'), (0x1D000, 'V'), (0x1D0F6, 'X'), (0x1D100, 'V'), @@ -6138,10 +6297,6 @@ def _seg_58(): (0x1D163, 'M', '𝅘𝅥𝅱'), (0x1D164, 'M', '𝅘𝅥𝅲'), (0x1D165, 'V'), - ] - -def _seg_59(): - return [ (0x1D173, 'X'), (0x1D17B, 'V'), (0x1D1BB, 'M', '𝆹𝅥'), @@ -6151,9 +6306,11 @@ def _seg_59(): (0x1D1BF, 'M', '𝆹𝅥𝅯'), (0x1D1C0, 'M', '𝆺𝅥𝅯'), (0x1D1C1, 'V'), - (0x1D1E9, 'X'), + (0x1D1EB, 'X'), (0x1D200, 'V'), (0x1D246, 'X'), + (0x1D2C0, 'V'), + (0x1D2D4, 'X'), (0x1D2E0, 'V'), (0x1D2F4, 'X'), (0x1D300, 'V'), @@ -6193,6 +6350,10 @@ def _seg_59(): (0x1D41E, 'M', 'e'), (0x1D41F, 'M', 'f'), (0x1D420, 'M', 'g'), + ] + +def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D421, 'M', 'h'), (0x1D422, 'M', 'i'), (0x1D423, 'M', 'j'), @@ -6242,10 +6403,6 @@ def _seg_59(): (0x1D44F, 'M', 'b'), (0x1D450, 'M', 'c'), (0x1D451, 'M', 'd'), - ] - -def _seg_60(): - return [ (0x1D452, 'M', 'e'), (0x1D453, 'M', 'f'), (0x1D454, 'M', 'g'), @@ -6297,6 +6454,10 @@ def _seg_60(): (0x1D482, 'M', 'a'), (0x1D483, 'M', 'b'), (0x1D484, 'M', 'c'), + ] + +def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D485, 'M', 'd'), (0x1D486, 'M', 'e'), (0x1D487, 'M', 'f'), @@ -6346,10 +6507,6 @@ def _seg_60(): (0x1D4B6, 'M', 'a'), (0x1D4B7, 'M', 'b'), (0x1D4B8, 'M', 'c'), - ] - -def _seg_61(): - return [ (0x1D4B9, 'M', 'd'), (0x1D4BA, 'X'), (0x1D4BB, 'M', 'f'), @@ -6401,6 +6558,10 @@ def _seg_61(): (0x1D4E9, 'M', 'z'), (0x1D4EA, 'M', 'a'), (0x1D4EB, 'M', 'b'), + ] + +def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D4EC, 'M', 'c'), (0x1D4ED, 'M', 'd'), (0x1D4EE, 'M', 'e'), @@ -6450,10 +6611,6 @@ def _seg_61(): (0x1D51B, 'M', 'x'), (0x1D51C, 'M', 'y'), (0x1D51D, 'X'), - ] - -def _seg_62(): - return [ (0x1D51E, 'M', 'a'), (0x1D51F, 'M', 'b'), (0x1D520, 'M', 'c'), @@ -6505,6 +6662,10 @@ def _seg_62(): (0x1D550, 'M', 'y'), (0x1D551, 'X'), (0x1D552, 'M', 'a'), + ] + +def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D553, 'M', 'b'), (0x1D554, 'M', 'c'), (0x1D555, 'M', 'd'), @@ -6554,10 +6715,6 @@ def _seg_62(): (0x1D581, 'M', 'v'), (0x1D582, 'M', 'w'), (0x1D583, 'M', 'x'), - ] - -def _seg_63(): - return [ (0x1D584, 'M', 'y'), (0x1D585, 'M', 'z'), (0x1D586, 'M', 'a'), @@ -6609,6 +6766,10 @@ def _seg_63(): (0x1D5B4, 'M', 'u'), (0x1D5B5, 'M', 'v'), (0x1D5B6, 'M', 'w'), + ] + +def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D5B7, 'M', 'x'), (0x1D5B8, 'M', 'y'), (0x1D5B9, 'M', 'z'), @@ -6658,10 +6819,6 @@ def _seg_63(): (0x1D5E5, 'M', 'r'), (0x1D5E6, 'M', 's'), (0x1D5E7, 'M', 't'), - ] - -def _seg_64(): - return [ (0x1D5E8, 'M', 'u'), (0x1D5E9, 'M', 'v'), (0x1D5EA, 'M', 'w'), @@ -6713,6 +6870,10 @@ def _seg_64(): (0x1D618, 'M', 'q'), (0x1D619, 'M', 'r'), (0x1D61A, 'M', 's'), + ] + +def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D61B, 'M', 't'), (0x1D61C, 'M', 'u'), (0x1D61D, 'M', 'v'), @@ -6762,10 +6923,6 @@ def _seg_64(): (0x1D649, 'M', 'n'), (0x1D64A, 'M', 'o'), (0x1D64B, 'M', 'p'), - ] - -def _seg_65(): - return [ (0x1D64C, 'M', 'q'), (0x1D64D, 'M', 'r'), (0x1D64E, 'M', 's'), @@ -6817,6 +6974,10 @@ def _seg_65(): (0x1D67C, 'M', 'm'), (0x1D67D, 'M', 'n'), (0x1D67E, 'M', 'o'), + ] + +def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D67F, 'M', 'p'), (0x1D680, 'M', 'q'), (0x1D681, 'M', 'r'), @@ -6866,10 +7027,6 @@ def _seg_65(): (0x1D6AE, 'M', 'η'), (0x1D6AF, 'M', 'θ'), (0x1D6B0, 'M', 'ι'), - ] - -def _seg_66(): - return [ (0x1D6B1, 'M', 'κ'), (0x1D6B2, 'M', 'λ'), (0x1D6B3, 'M', 'μ'), @@ -6921,6 +7078,10 @@ def _seg_66(): (0x1D6E2, 'M', 'α'), (0x1D6E3, 'M', 'β'), (0x1D6E4, 'M', 'γ'), + ] + +def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D6E5, 'M', 'δ'), (0x1D6E6, 'M', 'ε'), (0x1D6E7, 'M', 'ζ'), @@ -6970,10 +7131,6 @@ def _seg_66(): (0x1D714, 'M', 'ω'), (0x1D715, 'M', '∂'), (0x1D716, 'M', 'ε'), - ] - -def _seg_67(): - return [ (0x1D717, 'M', 'θ'), (0x1D718, 'M', 'κ'), (0x1D719, 'M', 'φ'), @@ -7025,6 +7182,10 @@ def _seg_67(): (0x1D747, 'M', 'σ'), (0x1D749, 'M', 'τ'), (0x1D74A, 'M', 'υ'), + ] + +def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D74B, 'M', 'φ'), (0x1D74C, 'M', 'χ'), (0x1D74D, 'M', 'ψ'), @@ -7074,10 +7235,6 @@ def _seg_67(): (0x1D779, 'M', 'κ'), (0x1D77A, 'M', 'λ'), (0x1D77B, 'M', 'μ'), - ] - -def _seg_68(): - return [ (0x1D77C, 'M', 'ν'), (0x1D77D, 'M', 'ξ'), (0x1D77E, 'M', 'ο'), @@ -7129,6 +7286,10 @@ def _seg_68(): (0x1D7AD, 'M', 'δ'), (0x1D7AE, 'M', 'ε'), (0x1D7AF, 'M', 'ζ'), + ] + +def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1D7B0, 'M', 'η'), (0x1D7B1, 'M', 'θ'), (0x1D7B2, 'M', 'ι'), @@ -7178,10 +7339,6 @@ def _seg_68(): (0x1D7E1, 'M', '9'), (0x1D7E2, 'M', '0'), (0x1D7E3, 'M', '1'), - ] - -def _seg_69(): - return [ (0x1D7E4, 'M', '2'), (0x1D7E5, 'M', '3'), (0x1D7E6, 'M', '4'), @@ -7216,6 +7373,10 @@ def _seg_69(): (0x1DAA0, 'X'), (0x1DAA1, 'V'), (0x1DAB0, 'X'), + (0x1DF00, 'V'), + (0x1DF1F, 'X'), + (0x1DF25, 'V'), + (0x1DF2B, 'X'), (0x1E000, 'V'), (0x1E007, 'X'), (0x1E008, 'V'), @@ -7226,6 +7387,75 @@ def _seg_69(): (0x1E025, 'X'), (0x1E026, 'V'), (0x1E02B, 'X'), + (0x1E030, 'M', 'а'), + (0x1E031, 'M', 'б'), + (0x1E032, 'M', 'в'), + ] + +def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E033, 'M', 'г'), + (0x1E034, 'M', 'д'), + (0x1E035, 'M', 'е'), + (0x1E036, 'M', 'ж'), + (0x1E037, 'M', 'з'), + (0x1E038, 'M', 'и'), + (0x1E039, 'M', 'к'), + (0x1E03A, 'M', 'л'), + (0x1E03B, 'M', 'м'), + (0x1E03C, 'M', 'о'), + (0x1E03D, 'M', 'п'), + (0x1E03E, 'M', 'р'), + (0x1E03F, 'M', 'с'), + (0x1E040, 'M', 'т'), + (0x1E041, 'M', 'у'), + (0x1E042, 'M', 'ф'), + (0x1E043, 'M', 'х'), + (0x1E044, 'M', 'ц'), + (0x1E045, 'M', 'ч'), + (0x1E046, 'M', 'ш'), + (0x1E047, 'M', 'ы'), + (0x1E048, 'M', 'э'), + (0x1E049, 'M', 'ю'), + (0x1E04A, 'M', 'ꚉ'), + (0x1E04B, 'M', 'ә'), + (0x1E04C, 'M', 'і'), + (0x1E04D, 'M', 'ј'), + (0x1E04E, 'M', 'ө'), + (0x1E04F, 'M', 'ү'), + (0x1E050, 'M', 'ӏ'), + (0x1E051, 'M', 'а'), + (0x1E052, 'M', 'б'), + (0x1E053, 'M', 'в'), + (0x1E054, 'M', 'г'), + (0x1E055, 'M', 'д'), + (0x1E056, 'M', 'е'), + (0x1E057, 'M', 'ж'), + (0x1E058, 'M', 'з'), + (0x1E059, 'M', 'и'), + (0x1E05A, 'M', 'к'), + (0x1E05B, 'M', 'л'), + (0x1E05C, 'M', 'о'), + (0x1E05D, 'M', 'п'), + (0x1E05E, 'M', 'с'), + (0x1E05F, 'M', 'у'), + (0x1E060, 'M', 'ф'), + (0x1E061, 'M', 'х'), + (0x1E062, 'M', 'ц'), + (0x1E063, 'M', 'ч'), + (0x1E064, 'M', 'ш'), + (0x1E065, 'M', 'ъ'), + (0x1E066, 'M', 'ы'), + (0x1E067, 'M', 'ґ'), + (0x1E068, 'M', 'і'), + (0x1E069, 'M', 'ѕ'), + (0x1E06A, 'M', 'џ'), + (0x1E06B, 'M', 'ҫ'), + (0x1E06C, 'M', 'ꙑ'), + (0x1E06D, 'M', 'ұ'), + (0x1E06E, 'X'), + (0x1E08F, 'V'), + (0x1E090, 'X'), (0x1E100, 'V'), (0x1E12D, 'X'), (0x1E130, 'V'), @@ -7234,10 +7464,22 @@ def _seg_69(): (0x1E14A, 'X'), (0x1E14E, 'V'), (0x1E150, 'X'), + (0x1E290, 'V'), + (0x1E2AF, 'X'), (0x1E2C0, 'V'), (0x1E2FA, 'X'), (0x1E2FF, 'V'), (0x1E300, 'X'), + (0x1E4D0, 'V'), + (0x1E4FA, 'X'), + (0x1E7E0, 'V'), + (0x1E7E7, 'X'), + (0x1E7E8, 'V'), + (0x1E7EC, 'X'), + (0x1E7ED, 'V'), + (0x1E7EF, 'X'), + (0x1E7F0, 'V'), + (0x1E7FF, 'X'), (0x1E800, 'V'), (0x1E8C5, 'X'), (0x1E8C7, 'V'), @@ -7252,6 +7494,10 @@ def _seg_69(): (0x1E907, 'M', '𞤩'), (0x1E908, 'M', '𞤪'), (0x1E909, 'M', '𞤫'), + ] + +def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1E90A, 'M', '𞤬'), (0x1E90B, 'M', '𞤭'), (0x1E90C, 'M', '𞤮'), @@ -7282,10 +7528,6 @@ def _seg_69(): (0x1E95A, 'X'), (0x1E95E, 'V'), (0x1E960, 'X'), - ] - -def _seg_70(): - return [ (0x1EC71, 'V'), (0x1ECB5, 'X'), (0x1ED01, 'V'), @@ -7356,6 +7598,10 @@ def _seg_70(): (0x1EE48, 'X'), (0x1EE49, 'M', 'ي'), (0x1EE4A, 'X'), + ] + +def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1EE4B, 'M', 'ل'), (0x1EE4C, 'X'), (0x1EE4D, 'M', 'ن'), @@ -7386,10 +7632,6 @@ def _seg_70(): (0x1EE68, 'M', 'ط'), (0x1EE69, 'M', 'ي'), (0x1EE6A, 'M', 'ك'), - ] - -def _seg_71(): - return [ (0x1EE6B, 'X'), (0x1EE6C, 'M', 'م'), (0x1EE6D, 'M', 'ن'), @@ -7460,6 +7702,10 @@ def _seg_71(): (0x1EEB2, 'M', 'ق'), (0x1EEB3, 'M', 'ر'), (0x1EEB4, 'M', 'ش'), + ] + +def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1EEB5, 'M', 'ت'), (0x1EEB6, 'M', 'ث'), (0x1EEB7, 'M', 'خ'), @@ -7490,10 +7736,6 @@ def _seg_71(): (0x1F106, '3', '5,'), (0x1F107, '3', '6,'), (0x1F108, '3', '7,'), - ] - -def _seg_72(): - return [ (0x1F109, '3', '8,'), (0x1F10A, '3', '9,'), (0x1F10B, 'V'), @@ -7564,6 +7806,10 @@ def _seg_72(): (0x1F150, 'V'), (0x1F16A, 'M', 'mc'), (0x1F16B, 'M', 'md'), + ] + +def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1F16C, 'M', 'mr'), (0x1F16D, 'V'), (0x1F190, 'M', 'dj'), @@ -7594,10 +7840,6 @@ def _seg_72(): (0x1F221, 'M', '終'), (0x1F222, 'M', '生'), (0x1F223, 'M', '販'), - ] - -def _seg_73(): - return [ (0x1F224, 'M', '声'), (0x1F225, 'M', '吹'), (0x1F226, 'M', '演'), @@ -7640,16 +7882,18 @@ def _seg_73(): (0x1F266, 'X'), (0x1F300, 'V'), (0x1F6D8, 'X'), - (0x1F6E0, 'V'), + (0x1F6DC, 'V'), (0x1F6ED, 'X'), (0x1F6F0, 'V'), (0x1F6FD, 'X'), (0x1F700, 'V'), - (0x1F774, 'X'), - (0x1F780, 'V'), - (0x1F7D9, 'X'), + (0x1F777, 'X'), + (0x1F77B, 'V'), + (0x1F7DA, 'X'), (0x1F7E0, 'V'), (0x1F7EC, 'X'), + (0x1F7F0, 'V'), + (0x1F7F1, 'X'), (0x1F800, 'V'), (0x1F80C, 'X'), (0x1F810, 'V'), @@ -7663,27 +7907,27 @@ def _seg_73(): (0x1F8B0, 'V'), (0x1F8B2, 'X'), (0x1F900, 'V'), - (0x1F979, 'X'), - (0x1F97A, 'V'), - (0x1F9CC, 'X'), - (0x1F9CD, 'V'), (0x1FA54, 'X'), (0x1FA60, 'V'), (0x1FA6E, 'X'), + ] + +def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x1FA70, 'V'), - (0x1FA75, 'X'), - (0x1FA78, 'V'), - (0x1FA7B, 'X'), + (0x1FA7D, 'X'), (0x1FA80, 'V'), - (0x1FA87, 'X'), + (0x1FA89, 'X'), (0x1FA90, 'V'), - (0x1FAA9, 'X'), - (0x1FAB0, 'V'), - (0x1FAB7, 'X'), - (0x1FAC0, 'V'), - (0x1FAC3, 'X'), - (0x1FAD0, 'V'), - (0x1FAD7, 'X'), + (0x1FABE, 'X'), + (0x1FABF, 'V'), + (0x1FAC6, 'X'), + (0x1FACE, 'V'), + (0x1FADC, 'X'), + (0x1FAE0, 'V'), + (0x1FAE9, 'X'), + (0x1FAF0, 'V'), + (0x1FAF9, 'X'), (0x1FB00, 'V'), (0x1FB93, 'X'), (0x1FB94, 'V'), @@ -7698,15 +7942,11 @@ def _seg_73(): (0x1FBF7, 'M', '7'), (0x1FBF8, 'M', '8'), (0x1FBF9, 'M', '9'), - ] - -def _seg_74(): - return [ (0x1FBFA, 'X'), (0x20000, 'V'), - (0x2A6DE, 'X'), + (0x2A6E0, 'X'), (0x2A700, 'V'), - (0x2B735, 'X'), + (0x2B73A, 'X'), (0x2B740, 'V'), (0x2B81E, 'X'), (0x2B820, 'V'), @@ -7774,6 +8014,10 @@ def _seg_74(): (0x2F83C, 'M', '咞'), (0x2F83D, 'M', '吸'), (0x2F83E, 'M', '呈'), + ] + +def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F83F, 'M', '周'), (0x2F840, 'M', '咢'), (0x2F841, 'M', '哶'), @@ -7802,10 +8046,6 @@ def _seg_74(): (0x2F859, 'M', '𡓤'), (0x2F85A, 'M', '売'), (0x2F85B, 'M', '壷'), - ] - -def _seg_75(): - return [ (0x2F85C, 'M', '夆'), (0x2F85D, 'M', '多'), (0x2F85E, 'M', '夢'), @@ -7878,6 +8118,10 @@ def _seg_75(): (0x2F8A4, 'M', '𢛔'), (0x2F8A5, 'M', '惇'), (0x2F8A6, 'M', '慈'), + ] + +def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F8A7, 'M', '慌'), (0x2F8A8, 'M', '慎'), (0x2F8A9, 'M', '慌'), @@ -7906,10 +8150,6 @@ def _seg_75(): (0x2F8C0, 'M', '揅'), (0x2F8C1, 'M', '掩'), (0x2F8C2, 'M', '㨮'), - ] - -def _seg_76(): - return [ (0x2F8C3, 'M', '摩'), (0x2F8C4, 'M', '摾'), (0x2F8C5, 'M', '撝'), @@ -7982,6 +8222,10 @@ def _seg_76(): (0x2F908, 'M', '港'), (0x2F909, 'M', '湮'), (0x2F90A, 'M', '㴳'), + ] + +def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F90B, 'M', '滋'), (0x2F90C, 'M', '滇'), (0x2F90D, 'M', '𣻑'), @@ -8010,10 +8254,6 @@ def _seg_76(): (0x2F924, 'M', '犀'), (0x2F925, 'M', '犕'), (0x2F926, 'M', '𤜵'), - ] - -def _seg_77(): - return [ (0x2F927, 'M', '𤠔'), (0x2F928, 'M', '獺'), (0x2F929, 'M', '王'), @@ -8086,6 +8326,10 @@ def _seg_77(): (0x2F96F, 'M', '縂'), (0x2F970, 'M', '繅'), (0x2F971, 'M', '䌴'), + ] + +def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F972, 'M', '𦈨'), (0x2F973, 'M', '𦉇'), (0x2F974, 'M', '䍙'), @@ -8114,10 +8358,6 @@ def _seg_77(): (0x2F98B, 'M', '舁'), (0x2F98C, 'M', '舄'), (0x2F98D, 'M', '辞'), - ] - -def _seg_78(): - return [ (0x2F98E, 'M', '䑫'), (0x2F98F, 'M', '芑'), (0x2F990, 'M', '芋'), @@ -8190,6 +8430,10 @@ def _seg_78(): (0x2F9D3, 'M', '𧲨'), (0x2F9D4, 'M', '貫'), (0x2F9D5, 'M', '賁'), + ] + +def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ (0x2F9D6, 'M', '贛'), (0x2F9D7, 'M', '起'), (0x2F9D8, 'M', '𧼯'), @@ -8218,10 +8462,6 @@ def _seg_78(): (0x2F9EF, 'M', '䦕'), (0x2F9F0, 'M', '閷'), (0x2F9F1, 'M', '𨵷'), - ] - -def _seg_79(): - return [ (0x2F9F2, 'M', '䧦'), (0x2F9F3, 'M', '雃'), (0x2F9F4, 'M', '嶲'), @@ -8268,6 +8508,8 @@ def _seg_79(): (0x2FA1E, 'X'), (0x30000, 'V'), (0x3134B, 'X'), + (0x31350, 'V'), + (0x323B0, 'X'), (0xE0100, 'I'), (0xE01F0, 'X'), ] @@ -8353,4 +8595,6 @@ uts46data = tuple( + _seg_77() + _seg_78() + _seg_79() -) + + _seg_80() + + _seg_81() +) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/env/Lib/site-packages/pip/_vendor/msgpack/__init__.py b/env/Lib/site-packages/pip/_vendor/msgpack/__init__.py index d6705e22b79dd53f3896bee29eb1e2f12bf106eb..1300b866043e22e3b318ba791d31333ca8fe8514 100644 --- a/env/Lib/site-packages/pip/_vendor/msgpack/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/msgpack/__init__.py @@ -1,5 +1,4 @@ # coding: utf-8 -from ._version import version from .exceptions import * from .ext import ExtType, Timestamp @@ -7,6 +6,10 @@ import os import sys +version = (1, 0, 5) +__version__ = "1.0.5" + + if os.environ.get("MSGPACK_PUREPYTHON") or sys.version_info[0] == 2: from .fallback import Packer, unpackb, Unpacker else: diff --git a/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index add4afe4f30d9337b32aa61801fc95913ccfcd1b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/_version.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/_version.cpython-39.pyc deleted file mode 100644 index 66be0c12af734ae3353ff772c42306861e6ea582..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/_version.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-39.pyc deleted file mode 100644 index db4c9815e830afe6373dd977e3050f29038a7de9..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-39.pyc deleted file mode 100644 index 8044173388082122097ef9782479206aeb36172b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/ext.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-39.pyc deleted file mode 100644 index 8f9a25be63168dd71d7fbebd50724904a01f0e92..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/msgpack/_version.py b/env/Lib/site-packages/pip/_vendor/msgpack/_version.py deleted file mode 100644 index 1c83c8ed3d618cd68e774a30be6737da1dda8b15..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/msgpack/_version.py +++ /dev/null @@ -1 +0,0 @@ -version = (1, 0, 2) diff --git a/env/Lib/site-packages/pip/_vendor/msgpack/ext.py b/env/Lib/site-packages/pip/_vendor/msgpack/ext.py index 4eb9dd65adc9aff07547f5ef7541bdf2be91124a..23e0d6b41ce6a36a2bc1a9657ff68aeb99d8b32f 100644 --- a/env/Lib/site-packages/pip/_vendor/msgpack/ext.py +++ b/env/Lib/site-packages/pip/_vendor/msgpack/ext.py @@ -56,10 +56,10 @@ class Timestamp(object): Note: Negative times (before the UNIX epoch) are represented as negative seconds + positive ns. """ if not isinstance(seconds, int_types): - raise TypeError("seconds must be an interger") + raise TypeError("seconds must be an integer") if not isinstance(nanoseconds, int_types): raise TypeError("nanoseconds must be an integer") - if not (0 <= nanoseconds < 10 ** 9): + if not (0 <= nanoseconds < 10**9): raise ValueError( "nanoseconds must be a non-negative integer less than 999999999." ) @@ -143,7 +143,7 @@ class Timestamp(object): :type unix_float: int or float. """ seconds = int(unix_sec // 1) - nanoseconds = int((unix_sec % 1) * 10 ** 9) + nanoseconds = int((unix_sec % 1) * 10**9) return Timestamp(seconds, nanoseconds) def to_unix(self): @@ -161,7 +161,7 @@ class Timestamp(object): :param int unix_ns: Posix timestamp in nanoseconds. :rtype: Timestamp """ - return Timestamp(*divmod(unix_ns, 10 ** 9)) + return Timestamp(*divmod(unix_ns, 10**9)) def to_unix_nano(self): """Get the timestamp as a unixtime in nanoseconds. @@ -169,7 +169,7 @@ class Timestamp(object): :returns: posix timestamp in nanoseconds :rtype: int """ - return self.seconds * 10 ** 9 + self.nanoseconds + return self.seconds * 10**9 + self.nanoseconds def to_datetime(self): """Get the timestamp as a UTC datetime. diff --git a/env/Lib/site-packages/pip/_vendor/msgpack/fallback.py b/env/Lib/site-packages/pip/_vendor/msgpack/fallback.py index 0bfa94eacb6ae5cfacf395f13511413dd6f18ad4..e8cebc1bef7870a5e772ea066c485eddf5c1c57c 100644 --- a/env/Lib/site-packages/pip/_vendor/msgpack/fallback.py +++ b/env/Lib/site-packages/pip/_vendor/msgpack/fallback.py @@ -1,5 +1,4 @@ """Fallback pure Python implementation of msgpack""" - from datetime import datetime as _DateTime import sys import struct @@ -12,7 +11,6 @@ if PY2: def dict_iteritems(d): return d.iteritems() - else: int_types = int unicode = str @@ -33,7 +31,6 @@ if sys.version_info < (3, 5): and e.args[0].startswith("maximum recursion depth exceeded") ) - else: def _is_recursionerror(e): @@ -69,7 +66,6 @@ if hasattr(sys, "pypy_version_info"): def getvalue(self): return self.builder.build() - else: USING_STRINGBUILDER = False from io import BytesIO as StringIO @@ -144,10 +140,41 @@ if sys.version_info < (2, 7, 6): """Explicit type cast for legacy struct.unpack_from""" return struct.unpack_from(f, bytes(b), o) - else: _unpack_from = struct.unpack_from +_NO_FORMAT_USED = "" +_MSGPACK_HEADERS = { + 0xC4: (1, _NO_FORMAT_USED, TYPE_BIN), + 0xC5: (2, ">H", TYPE_BIN), + 0xC6: (4, ">I", TYPE_BIN), + 0xC7: (2, "Bb", TYPE_EXT), + 0xC8: (3, ">Hb", TYPE_EXT), + 0xC9: (5, ">Ib", TYPE_EXT), + 0xCA: (4, ">f"), + 0xCB: (8, ">d"), + 0xCC: (1, _NO_FORMAT_USED), + 0xCD: (2, ">H"), + 0xCE: (4, ">I"), + 0xCF: (8, ">Q"), + 0xD0: (1, "b"), + 0xD1: (2, ">h"), + 0xD2: (4, ">i"), + 0xD3: (8, ">q"), + 0xD4: (1, "b1s", TYPE_EXT), + 0xD5: (2, "b2s", TYPE_EXT), + 0xD6: (4, "b4s", TYPE_EXT), + 0xD7: (8, "b8s", TYPE_EXT), + 0xD8: (16, "b16s", TYPE_EXT), + 0xD9: (1, _NO_FORMAT_USED, TYPE_RAW), + 0xDA: (2, ">H", TYPE_RAW), + 0xDB: (4, ">I", TYPE_RAW), + 0xDC: (2, ">H", TYPE_ARRAY), + 0xDD: (4, ">I", TYPE_ARRAY), + 0xDE: (2, ">H", TYPE_MAP), + 0xDF: (4, ">I", TYPE_MAP), +} + class Unpacker(object): """Streaming unpacker. @@ -229,7 +256,7 @@ class Unpacker(object): Example of streaming deserialize from socket:: - unpacker = Unpacker(max_buffer_size) + unpacker = Unpacker() while True: buf = sock.recv(1024**2) if not buf: @@ -291,7 +318,7 @@ class Unpacker(object): self._buf_checkpoint = 0 if not max_buffer_size: - max_buffer_size = 2 ** 31 - 1 + max_buffer_size = 2**31 - 1 if max_str_len == -1: max_str_len = max_buffer_size if max_bin_len == -1: @@ -354,7 +381,7 @@ class Unpacker(object): self._buffer.extend(view) def _consume(self): - """ Gets rid of the used parts of the buffer. """ + """Gets rid of the used parts of the buffer.""" self._stream_offset += self._buff_i - self._buf_checkpoint self._buf_checkpoint = self._buff_i @@ -396,6 +423,8 @@ class Unpacker(object): # Read from file remain_bytes = -remain_bytes + if remain_bytes + len(self._buffer) > self._max_buffer_size: + raise BufferFull while remain_bytes > 0: to_read_bytes = max(self._read_size, remain_bytes) read_data = self.file_like.read(to_read_bytes) @@ -409,7 +438,7 @@ class Unpacker(object): self._buff_i = 0 # rollback raise OutOfData - def _read_header(self, execute=EX_CONSTRUCT): + def _read_header(self): typ = TYPE_IMMEDIATE n = 0 obj = None @@ -424,205 +453,95 @@ class Unpacker(object): n = b & 0b00011111 typ = TYPE_RAW if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + raise ValueError("%s exceeds max_str_len(%s)" % (n, self._max_str_len)) obj = self._read(n) elif b & 0b11110000 == 0b10010000: n = b & 0b00001111 typ = TYPE_ARRAY if n > self._max_array_len: - raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + raise ValueError( + "%s exceeds max_array_len(%s)" % (n, self._max_array_len) + ) elif b & 0b11110000 == 0b10000000: n = b & 0b00001111 typ = TYPE_MAP if n > self._max_map_len: - raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + raise ValueError("%s exceeds max_map_len(%s)" % (n, self._max_map_len)) elif b == 0xC0: obj = None elif b == 0xC2: obj = False elif b == 0xC3: obj = True - elif b == 0xC4: - typ = TYPE_BIN - self._reserve(1) - n = self._buffer[self._buff_i] - self._buff_i += 1 - if n > self._max_bin_len: - raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) - obj = self._read(n) - elif b == 0xC5: - typ = TYPE_BIN - self._reserve(2) - n = _unpack_from(">H", self._buffer, self._buff_i)[0] - self._buff_i += 2 - if n > self._max_bin_len: - raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) - obj = self._read(n) - elif b == 0xC6: - typ = TYPE_BIN - self._reserve(4) - n = _unpack_from(">I", self._buffer, self._buff_i)[0] - self._buff_i += 4 + elif 0xC4 <= b <= 0xC6: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + if len(fmt) > 0: + n = _unpack_from(fmt, self._buffer, self._buff_i)[0] + else: + n = self._buffer[self._buff_i] + self._buff_i += size if n > self._max_bin_len: raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._read(n) - elif b == 0xC7: # ext 8 - typ = TYPE_EXT - self._reserve(2) - L, n = _unpack_from("Bb", self._buffer, self._buff_i) - self._buff_i += 2 + elif 0xC7 <= b <= 0xC9: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + L, n = _unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size if L > self._max_ext_len: raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._read(L) - elif b == 0xC8: # ext 16 - typ = TYPE_EXT - self._reserve(3) - L, n = _unpack_from(">Hb", self._buffer, self._buff_i) - self._buff_i += 3 - if L > self._max_ext_len: - raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) - obj = self._read(L) - elif b == 0xC9: # ext 32 - typ = TYPE_EXT - self._reserve(5) - L, n = _unpack_from(">Ib", self._buffer, self._buff_i) - self._buff_i += 5 - if L > self._max_ext_len: - raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) - obj = self._read(L) - elif b == 0xCA: - self._reserve(4) - obj = _unpack_from(">f", self._buffer, self._buff_i)[0] - self._buff_i += 4 - elif b == 0xCB: - self._reserve(8) - obj = _unpack_from(">d", self._buffer, self._buff_i)[0] - self._buff_i += 8 - elif b == 0xCC: - self._reserve(1) - obj = self._buffer[self._buff_i] - self._buff_i += 1 - elif b == 0xCD: - self._reserve(2) - obj = _unpack_from(">H", self._buffer, self._buff_i)[0] - self._buff_i += 2 - elif b == 0xCE: - self._reserve(4) - obj = _unpack_from(">I", self._buffer, self._buff_i)[0] - self._buff_i += 4 - elif b == 0xCF: - self._reserve(8) - obj = _unpack_from(">Q", self._buffer, self._buff_i)[0] - self._buff_i += 8 - elif b == 0xD0: - self._reserve(1) - obj = _unpack_from("b", self._buffer, self._buff_i)[0] - self._buff_i += 1 - elif b == 0xD1: - self._reserve(2) - obj = _unpack_from(">h", self._buffer, self._buff_i)[0] - self._buff_i += 2 - elif b == 0xD2: - self._reserve(4) - obj = _unpack_from(">i", self._buffer, self._buff_i)[0] - self._buff_i += 4 - elif b == 0xD3: - self._reserve(8) - obj = _unpack_from(">q", self._buffer, self._buff_i)[0] - self._buff_i += 8 - elif b == 0xD4: # fixext 1 - typ = TYPE_EXT - if self._max_ext_len < 1: - raise ValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) - self._reserve(2) - n, obj = _unpack_from("b1s", self._buffer, self._buff_i) - self._buff_i += 2 - elif b == 0xD5: # fixext 2 - typ = TYPE_EXT - if self._max_ext_len < 2: - raise ValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) - self._reserve(3) - n, obj = _unpack_from("b2s", self._buffer, self._buff_i) - self._buff_i += 3 - elif b == 0xD6: # fixext 4 - typ = TYPE_EXT - if self._max_ext_len < 4: - raise ValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) - self._reserve(5) - n, obj = _unpack_from("b4s", self._buffer, self._buff_i) - self._buff_i += 5 - elif b == 0xD7: # fixext 8 - typ = TYPE_EXT - if self._max_ext_len < 8: - raise ValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) - self._reserve(9) - n, obj = _unpack_from("b8s", self._buffer, self._buff_i) - self._buff_i += 9 - elif b == 0xD8: # fixext 16 - typ = TYPE_EXT - if self._max_ext_len < 16: - raise ValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) - self._reserve(17) - n, obj = _unpack_from("b16s", self._buffer, self._buff_i) - self._buff_i += 17 - elif b == 0xD9: - typ = TYPE_RAW - self._reserve(1) - n = self._buffer[self._buff_i] - self._buff_i += 1 - if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._read(n) - elif b == 0xDA: - typ = TYPE_RAW - self._reserve(2) - (n,) = _unpack_from(">H", self._buffer, self._buff_i) - self._buff_i += 2 - if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._read(n) - elif b == 0xDB: - typ = TYPE_RAW - self._reserve(4) - (n,) = _unpack_from(">I", self._buffer, self._buff_i) - self._buff_i += 4 + elif 0xCA <= b <= 0xD3: + size, fmt = _MSGPACK_HEADERS[b] + self._reserve(size) + if len(fmt) > 0: + obj = _unpack_from(fmt, self._buffer, self._buff_i)[0] + else: + obj = self._buffer[self._buff_i] + self._buff_i += size + elif 0xD4 <= b <= 0xD8: + size, fmt, typ = _MSGPACK_HEADERS[b] + if self._max_ext_len < size: + raise ValueError( + "%s exceeds max_ext_len(%s)" % (size, self._max_ext_len) + ) + self._reserve(size + 1) + n, obj = _unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size + 1 + elif 0xD9 <= b <= 0xDB: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + if len(fmt) > 0: + (n,) = _unpack_from(fmt, self._buffer, self._buff_i) + else: + n = self._buffer[self._buff_i] + self._buff_i += size if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + raise ValueError("%s exceeds max_str_len(%s)" % (n, self._max_str_len)) obj = self._read(n) - elif b == 0xDC: - typ = TYPE_ARRAY - self._reserve(2) - (n,) = _unpack_from(">H", self._buffer, self._buff_i) - self._buff_i += 2 + elif 0xDC <= b <= 0xDD: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + (n,) = _unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size if n > self._max_array_len: - raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) - elif b == 0xDD: - typ = TYPE_ARRAY - self._reserve(4) - (n,) = _unpack_from(">I", self._buffer, self._buff_i) - self._buff_i += 4 - if n > self._max_array_len: - raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) - elif b == 0xDE: - self._reserve(2) - (n,) = _unpack_from(">H", self._buffer, self._buff_i) - self._buff_i += 2 - if n > self._max_map_len: - raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) - typ = TYPE_MAP - elif b == 0xDF: - self._reserve(4) - (n,) = _unpack_from(">I", self._buffer, self._buff_i) - self._buff_i += 4 + raise ValueError( + "%s exceeds max_array_len(%s)" % (n, self._max_array_len) + ) + elif 0xDE <= b <= 0xDF: + size, fmt, typ = _MSGPACK_HEADERS[b] + self._reserve(size) + (n,) = _unpack_from(fmt, self._buffer, self._buff_i) + self._buff_i += size if n > self._max_map_len: - raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) - typ = TYPE_MAP + raise ValueError("%s exceeds max_map_len(%s)" % (n, self._max_map_len)) else: raise FormatError("Unknown header: 0x%x" % b) return typ, n, obj def _unpack(self, execute=EX_CONSTRUCT): - typ, n, obj = self._read_header(execute) + typ, n, obj = self._read_header() if execute == EX_READ_ARRAY_HEADER: if typ != TYPE_ARRAY: @@ -883,20 +802,20 @@ class Packer(object): raise OverflowError("Integer value out of range") if check(obj, (bytes, bytearray)): n = len(obj) - if n >= 2 ** 32: + if n >= 2**32: raise ValueError("%s is too large" % type(obj).__name__) self._pack_bin_header(n) return self._buffer.write(obj) if check(obj, unicode): obj = obj.encode("utf-8", self._unicode_errors) n = len(obj) - if n >= 2 ** 32: + if n >= 2**32: raise ValueError("String is too large") self._pack_raw_header(n) return self._buffer.write(obj) if check(obj, memoryview): - n = len(obj) * obj.itemsize - if n >= 2 ** 32: + n = obj.nbytes + if n >= 2**32: raise ValueError("Memoryview is too large") self._pack_bin_header(n) return self._buffer.write(obj) @@ -953,6 +872,10 @@ class Packer(object): obj = self._default(obj) default_used = 1 continue + + if self._datetime and check(obj, _DateTime): + raise ValueError("Cannot serialize %r where tzinfo=None" % (obj,)) + raise TypeError("Cannot serialize %r" % (obj,)) def pack(self, obj): @@ -974,7 +897,7 @@ class Packer(object): return ret def pack_array_header(self, n): - if n >= 2 ** 32: + if n >= 2**32: raise ValueError self._pack_array_header(n) if self._autoreset: @@ -983,7 +906,7 @@ class Packer(object): return ret def pack_map_header(self, n): - if n >= 2 ** 32: + if n >= 2**32: raise ValueError self._pack_map_header(n) if self._autoreset: diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__about__.py b/env/Lib/site-packages/pip/_vendor/packaging/__about__.py index 4c43a968c8adc99637a446c5c7619a43ceceb0df..3551bc2d29846441299cf57b397b02fc164c99b9 100644 --- a/env/Lib/site-packages/pip/_vendor/packaging/__about__.py +++ b/env/Lib/site-packages/pip/_vendor/packaging/__about__.py @@ -1,7 +1,6 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function __all__ = [ "__title__", @@ -18,7 +17,7 @@ __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "20.9" +__version__ = "21.3" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__init__.py b/env/Lib/site-packages/pip/_vendor/packaging/__init__.py index a0cf67df5245be16a020ca048832e180f7ce8661..3c50c5dcfeeda2efed282200a5c5cc8c5f7542f7 100644 --- a/env/Lib/site-packages/pip/_vendor/packaging/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/packaging/__init__.py @@ -1,7 +1,6 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function from .__about__ import ( __author__, diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/__about__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/__about__.cpython-39.pyc deleted file mode 100644 index fecdb66800010426132b32b6f20e103bfe7cc6e0..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/__about__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 200d3c409305fe272db30a88f5ce1adce2408cd1..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/_compat.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/_compat.cpython-39.pyc deleted file mode 100644 index 435f600ff43a89ee86ef147b255333fefc321460..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/_compat.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-39.pyc deleted file mode 100644 index 58a941566764676660502683697a93452f777b43..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/_typing.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/_typing.cpython-39.pyc deleted file mode 100644 index 5c88f89de500b5c0b72e8e3320665be133bada84..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/_typing.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-39.pyc deleted file mode 100644 index 0480d34e188df2c3536d4830f9aeb47d6a187009..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-39.pyc deleted file mode 100644 index d42056248ced5aafe8868c86c2fe5e45cb488297..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-39.pyc deleted file mode 100644 index 86b3f711973562313491c819d34b7fbd22dfd96e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-39.pyc deleted file mode 100644 index 0bfdb1a79a6c8ce95d6d2cb22df79fa9a1c8a885..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/tags.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-39.pyc deleted file mode 100644 index 09d01046c240055f8b96ebd19e5a7fb6041595b6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-39.pyc deleted file mode 100644 index f4437ce827b023287d76e6ede6dfebe88eb07b47..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/packaging/_compat.py b/env/Lib/site-packages/pip/_vendor/packaging/_compat.py deleted file mode 100644 index e54bd4ede8761df5882a3354bc22bdee7a5e8a8b..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/packaging/_compat.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import sys - -from ._typing import TYPE_CHECKING - -if TYPE_CHECKING: # pragma: no cover - from typing import Any, Dict, Tuple, Type - - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - -# flake8: noqa - -if PY3: - string_types = (str,) -else: - string_types = (basestring,) - - -def with_metaclass(meta, *bases): - # type: (Type[Any], Tuple[Type[Any], ...]) -> Any - """ - Create a base class with a metaclass. - """ - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): # type: ignore - def __new__(cls, name, this_bases, d): - # type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any - return meta(name, bases, d) - - return type.__new__(metaclass, "temporary_class", (), {}) diff --git a/env/Lib/site-packages/pip/_vendor/packaging/_structures.py b/env/Lib/site-packages/pip/_vendor/packaging/_structures.py index 800d5c5588c99dc216cdea5084da440efb641945..90a6465f9682c886363eea5327dac64bf623a6ff 100644 --- a/env/Lib/site-packages/pip/_vendor/packaging/_structures.py +++ b/env/Lib/site-packages/pip/_vendor/packaging/_structures.py @@ -1,85 +1,60 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function -class InfinityType(object): - def __repr__(self): - # type: () -> str +class InfinityType: + def __repr__(self) -> str: return "Infinity" - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(repr(self)) - def __lt__(self, other): - # type: (object) -> bool + def __lt__(self, other: object) -> bool: return False - def __le__(self, other): - # type: (object) -> bool + def __le__(self, other: object) -> bool: return False - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) - def __ne__(self, other): - # type: (object) -> bool - return not isinstance(other, self.__class__) - - def __gt__(self, other): - # type: (object) -> bool + def __gt__(self, other: object) -> bool: return True - def __ge__(self, other): - # type: (object) -> bool + def __ge__(self, other: object) -> bool: return True - def __neg__(self): - # type: (object) -> NegativeInfinityType + def __neg__(self: object) -> "NegativeInfinityType": return NegativeInfinity Infinity = InfinityType() -class NegativeInfinityType(object): - def __repr__(self): - # type: () -> str +class NegativeInfinityType: + def __repr__(self) -> str: return "-Infinity" - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(repr(self)) - def __lt__(self, other): - # type: (object) -> bool + def __lt__(self, other: object) -> bool: return True - def __le__(self, other): - # type: (object) -> bool + def __le__(self, other: object) -> bool: return True - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: return isinstance(other, self.__class__) - def __ne__(self, other): - # type: (object) -> bool - return not isinstance(other, self.__class__) - - def __gt__(self, other): - # type: (object) -> bool + def __gt__(self, other: object) -> bool: return False - def __ge__(self, other): - # type: (object) -> bool + def __ge__(self, other: object) -> bool: return False - def __neg__(self): - # type: (object) -> InfinityType + def __neg__(self: object) -> InfinityType: return Infinity diff --git a/env/Lib/site-packages/pip/_vendor/packaging/_typing.py b/env/Lib/site-packages/pip/_vendor/packaging/_typing.py deleted file mode 100644 index 2846133bd8da3106a82e1b5f990fac8fca1c00b7..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/packaging/_typing.py +++ /dev/null @@ -1,48 +0,0 @@ -"""For neatly implementing static typing in packaging. - -`mypy` - the static type analysis tool we use - uses the `typing` module, which -provides core functionality fundamental to mypy's functioning. - -Generally, `typing` would be imported at runtime and used in that fashion - -it acts as a no-op at runtime and does not have any run-time overhead by -design. - -As it turns out, `typing` is not vendorable - it uses separate sources for -Python 2/Python 3. Thus, this codebase can not expect it to be present. -To work around this, mypy allows the typing import to be behind a False-y -optional to prevent it from running at runtime and type-comments can be used -to remove the need for the types to be accessible directly during runtime. - -This module provides the False-y guard in a nicely named fashion so that a -curious maintainer can reach here to read this. - -In packaging, all static-typing related imports should be guarded as follows: - - from pip._vendor.packaging._typing import TYPE_CHECKING - - if TYPE_CHECKING: - from typing import ... - -Ref: https://github.com/python/mypy/issues/3216 -""" - -__all__ = ["TYPE_CHECKING", "cast"] - -# The TYPE_CHECKING constant defined by the typing module is False at runtime -# but True while type checking. -if False: # pragma: no cover - from typing import TYPE_CHECKING -else: - TYPE_CHECKING = False - -# typing's cast syntax requires calling typing.cast at runtime, but we don't -# want to import typing at runtime. Here, we inform the type checkers that -# we're importing `typing.cast` as `cast` and re-implement typing.cast's -# runtime behavior in a block that is ignored by type checkers. -if TYPE_CHECKING: # pragma: no cover - # not executed at runtime - from typing import cast -else: - # executed at runtime - def cast(type_, value): # noqa - return value diff --git a/env/Lib/site-packages/pip/_vendor/packaging/markers.py b/env/Lib/site-packages/pip/_vendor/packaging/markers.py index 69a60cf1e95625d564cd0aa7e9b2c9b7cd809a27..540e7a4dc79d02a820e291b57c43335d5aa25a41 100644 --- a/env/Lib/site-packages/pip/_vendor/packaging/markers.py +++ b/env/Lib/site-packages/pip/_vendor/packaging/markers.py @@ -1,12 +1,12 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function import operator import os import platform import sys +from typing import Any, Callable, Dict, List, Optional, Tuple, Union from pip._vendor.pyparsing import ( # noqa: N817 Forward, @@ -20,16 +20,8 @@ from pip._vendor.pyparsing import ( # noqa: N817 stringStart, ) -from ._compat import string_types -from ._typing import TYPE_CHECKING from .specifiers import InvalidSpecifier, Specifier -if TYPE_CHECKING: # pragma: no cover - from typing import Any, Callable, Dict, List, Optional, Tuple, Union - - Operator = Callable[[str, str], bool] - - __all__ = [ "InvalidMarker", "UndefinedComparison", @@ -38,6 +30,8 @@ __all__ = [ "default_environment", ] +Operator = Callable[[str, str], bool] + class InvalidMarker(ValueError): """ @@ -58,39 +52,32 @@ class UndefinedEnvironmentName(ValueError): """ -class Node(object): - def __init__(self, value): - # type: (Any) -> None +class Node: + def __init__(self, value: Any) -> None: self.value = value - def __str__(self): - # type: () -> str + def __str__(self) -> str: return str(self.value) - def __repr__(self): - # type: () -> str - return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" - def serialize(self): - # type: () -> str + def serialize(self) -> str: raise NotImplementedError class Variable(Node): - def serialize(self): - # type: () -> str + def serialize(self) -> str: return str(self) class Value(Node): - def serialize(self): - # type: () -> str - return '"{0}"'.format(self) + def serialize(self) -> str: + return f'"{self}"' class Op(Node): - def serialize(self): - # type: () -> str + def serialize(self) -> str: return str(self) @@ -151,18 +138,18 @@ MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) MARKER = stringStart + MARKER_EXPR + stringEnd -def _coerce_parse_result(results): - # type: (Union[ParseResults, List[Any]]) -> List[Any] +def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: if isinstance(results, ParseResults): return [_coerce_parse_result(i) for i in results] else: return results -def _format_marker(marker, first=True): - # type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str +def _format_marker( + marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True +) -> str: - assert isinstance(marker, (list, tuple, string_types)) + assert isinstance(marker, (list, tuple, str)) # Sometimes we have a structure like [[...]] which is a single item list # where the single item is itself it's own list. In that case we want skip @@ -187,7 +174,7 @@ def _format_marker(marker, first=True): return marker -_operators = { +_operators: Dict[str, Operator] = { "in": lambda lhs, rhs: lhs in rhs, "not in": lambda lhs, rhs: lhs not in rhs, "<": operator.lt, @@ -196,11 +183,10 @@ _operators = { "!=": operator.ne, ">=": operator.ge, ">": operator.gt, -} # type: Dict[str, Operator] +} -def _eval_op(lhs, op, rhs): - # type: (str, Op, str) -> bool +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: try: spec = Specifier("".join([op.serialize(), rhs])) except InvalidSpecifier: @@ -208,40 +194,36 @@ def _eval_op(lhs, op, rhs): else: return spec.contains(lhs) - oper = _operators.get(op.serialize()) # type: Optional[Operator] + oper: Optional[Operator] = _operators.get(op.serialize()) if oper is None: - raise UndefinedComparison( - "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) - ) + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") return oper(lhs, rhs) -class Undefined(object): +class Undefined: pass _undefined = Undefined() -def _get_env(environment, name): - # type: (Dict[str, str], str) -> str - value = environment.get(name, _undefined) # type: Union[str, Undefined] +def _get_env(environment: Dict[str, str], name: str) -> str: + value: Union[str, Undefined] = environment.get(name, _undefined) if isinstance(value, Undefined): raise UndefinedEnvironmentName( - "{0!r} does not exist in evaluation environment.".format(name) + f"{name!r} does not exist in evaluation environment." ) return value -def _evaluate_markers(markers, environment): - # type: (List[Any], Dict[str, str]) -> bool - groups = [[]] # type: List[List[bool]] +def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: + groups: List[List[bool]] = [[]] for marker in markers: - assert isinstance(marker, (list, tuple, string_types)) + assert isinstance(marker, (list, tuple, str)) if isinstance(marker, list): groups[-1].append(_evaluate_markers(marker, environment)) @@ -264,8 +246,7 @@ def _evaluate_markers(markers, environment): return any(all(item) for item in groups) -def format_full_version(info): - # type: (sys._version_info) -> str +def format_full_version(info: "sys._version_info") -> str: version = "{0.major}.{0.minor}.{0.micro}".format(info) kind = info.releaselevel if kind != "final": @@ -273,18 +254,9 @@ def format_full_version(info): return version -def default_environment(): - # type: () -> Dict[str, str] - if hasattr(sys, "implementation"): - # Ignoring the `sys.implementation` reference for type checking due to - # mypy not liking that the attribute doesn't exist in Python 2.7 when - # run with the `--py27` flag. - iver = format_full_version(sys.implementation.version) # type: ignore - implementation_name = sys.implementation.name # type: ignore - else: - iver = "0" - implementation_name = "" - +def default_environment() -> Dict[str, str]: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name return { "implementation_name": implementation_name, "implementation_version": iver, @@ -300,27 +272,23 @@ def default_environment(): } -class Marker(object): - def __init__(self, marker): - # type: (str) -> None +class Marker: + def __init__(self, marker: str) -> None: try: self._markers = _coerce_parse_result(MARKER.parseString(marker)) except ParseException as e: - err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( - marker, marker[e.loc : e.loc + 8] + raise InvalidMarker( + f"Invalid marker: {marker!r}, parse error at " + f"{marker[e.loc : e.loc + 8]!r}" ) - raise InvalidMarker(err_str) - def __str__(self): - # type: () -> str + def __str__(self) -> str: return _format_marker(self._markers) - def __repr__(self): - # type: () -> str - return "<Marker({0!r})>".format(str(self)) + def __repr__(self) -> str: + return f"<Marker('{self}')>" - def evaluate(self, environment=None): - # type: (Optional[Dict[str, str]]) -> bool + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: """Evaluate a marker. Return the boolean from evaluating the given marker against the diff --git a/env/Lib/site-packages/pip/_vendor/packaging/requirements.py b/env/Lib/site-packages/pip/_vendor/packaging/requirements.py index c2a7fdacd593db77110860d33b75fcb566a61375..1eab7dd66d9bfdefea1a0e159303f1c09fa16d67 100644 --- a/env/Lib/site-packages/pip/_vendor/packaging/requirements.py +++ b/env/Lib/site-packages/pip/_vendor/packaging/requirements.py @@ -1,13 +1,13 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function import re import string -import sys +import urllib.parse +from typing import List, Optional as TOptional, Set -from pip._vendor.pyparsing import ( # noqa: N817 +from pip._vendor.pyparsing import ( # noqa Combine, Literal as L, Optional, @@ -20,19 +20,9 @@ from pip._vendor.pyparsing import ( # noqa: N817 stringStart, ) -from ._typing import TYPE_CHECKING from .markers import MARKER_EXPR, Marker from .specifiers import LegacySpecifier, Specifier, SpecifierSet -if sys.version_info[0] >= 3: - from urllib import parse as urlparse # pragma: no cover -else: # pragma: no cover - import urlparse - - -if TYPE_CHECKING: # pragma: no cover - from typing import List, Optional as TOptional, Set - class InvalidRequirement(ValueError): """ @@ -70,7 +60,7 @@ VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY VERSION_MANY = Combine( VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False )("_raw_spec") -_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) +_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) _VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") @@ -94,7 +84,7 @@ REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd REQUIREMENT.parseString("x[]") -class Requirement(object): +class Requirement: """Parse a requirement. Parse a given requirement string into its parts, such as name, specifier, @@ -107,54 +97,50 @@ class Requirement(object): # the thing as well as the version? What about the markers? # TODO: Can we normalize the name and extra name? - def __init__(self, requirement_string): - # type: (str) -> None + def __init__(self, requirement_string: str) -> None: try: req = REQUIREMENT.parseString(requirement_string) except ParseException as e: raise InvalidRequirement( - 'Parse error at "{0!r}": {1}'.format( - requirement_string[e.loc : e.loc + 8], e.msg - ) + f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' ) - self.name = req.name # type: str + self.name: str = req.name if req.url: - parsed_url = urlparse.urlparse(req.url) + parsed_url = urllib.parse.urlparse(req.url) if parsed_url.scheme == "file": - if urlparse.urlunparse(parsed_url) != req.url: + if urllib.parse.urlunparse(parsed_url) != req.url: raise InvalidRequirement("Invalid URL given") elif not (parsed_url.scheme and parsed_url.netloc) or ( not parsed_url.scheme and not parsed_url.netloc ): - raise InvalidRequirement("Invalid URL: {0}".format(req.url)) - self.url = req.url # type: TOptional[str] + raise InvalidRequirement(f"Invalid URL: {req.url}") + self.url: TOptional[str] = req.url else: self.url = None - self.extras = set(req.extras.asList() if req.extras else []) # type: Set[str] - self.specifier = SpecifierSet(req.specifier) # type: SpecifierSet - self.marker = req.marker if req.marker else None # type: TOptional[Marker] + self.extras: Set[str] = set(req.extras.asList() if req.extras else []) + self.specifier: SpecifierSet = SpecifierSet(req.specifier) + self.marker: TOptional[Marker] = req.marker if req.marker else None - def __str__(self): - # type: () -> str - parts = [self.name] # type: List[str] + def __str__(self) -> str: + parts: List[str] = [self.name] if self.extras: - parts.append("[{0}]".format(",".join(sorted(self.extras)))) + formatted_extras = ",".join(sorted(self.extras)) + parts.append(f"[{formatted_extras}]") if self.specifier: parts.append(str(self.specifier)) if self.url: - parts.append("@ {0}".format(self.url)) + parts.append(f"@ {self.url}") if self.marker: parts.append(" ") if self.marker: - parts.append("; {0}".format(self.marker)) + parts.append(f"; {self.marker}") return "".join(parts) - def __repr__(self): - # type: () -> str - return "<Requirement({0!r})>".format(str(self)) + def __repr__(self) -> str: + return f"<Requirement('{self}')>" diff --git a/env/Lib/site-packages/pip/_vendor/packaging/specifiers.py b/env/Lib/site-packages/pip/_vendor/packaging/specifiers.py index a6a83c1fe9393272bcfa2f28c7b11e5bfe77a66a..0e218a6f9f75ea2060a8b08d1f1a043fdad68df8 100644 --- a/env/Lib/site-packages/pip/_vendor/packaging/specifiers.py +++ b/env/Lib/site-packages/pip/_vendor/packaging/specifiers.py @@ -1,25 +1,33 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function import abc import functools import itertools import re import warnings +from typing import ( + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Pattern, + Set, + Tuple, + TypeVar, + Union, +) -from ._compat import string_types, with_metaclass -from ._typing import TYPE_CHECKING from .utils import canonicalize_version from .version import LegacyVersion, Version, parse -if TYPE_CHECKING: # pragma: no cover - from typing import Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union - - ParsedVersion = Union[Version, LegacyVersion] - UnparsedVersion = Union[Version, LegacyVersion, str] - CallableOperator = Callable[[ParsedVersion, str], bool] +ParsedVersion = Union[Version, LegacyVersion] +UnparsedVersion = Union[Version, LegacyVersion, str] +VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) +CallableOperator = Callable[[ParsedVersion, str], bool] class InvalidSpecifier(ValueError): @@ -28,64 +36,51 @@ class InvalidSpecifier(ValueError): """ -class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): # type: ignore +class BaseSpecifier(metaclass=abc.ABCMeta): @abc.abstractmethod - def __str__(self): - # type: () -> str + def __str__(self) -> str: """ Returns the str representation of this Specifier like object. This should be representative of the Specifier itself. """ @abc.abstractmethod - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: """ Returns a hash value for this Specifier like object. """ @abc.abstractmethod - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: """ Returns a boolean representing whether or not the two Specifier like objects are equal. """ - @abc.abstractmethod - def __ne__(self, other): - # type: (object) -> bool - """ - Returns a boolean representing whether or not the two Specifier like - objects are not equal. - """ - @abc.abstractproperty - def prereleases(self): - # type: () -> Optional[bool] + def prereleases(self) -> Optional[bool]: """ Returns whether or not pre-releases as a whole are allowed by this specifier. """ @prereleases.setter - def prereleases(self, value): - # type: (bool) -> None + def prereleases(self, value: bool) -> None: """ Sets whether or not pre-releases as a whole are allowed by this specifier. """ @abc.abstractmethod - def contains(self, item, prereleases=None): - # type: (str, Optional[bool]) -> bool + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: """ Determines if the given item is contained within this specifier. """ @abc.abstractmethod - def filter(self, iterable, prereleases=None): - # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion] + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. @@ -94,48 +89,43 @@ class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): # type: ignore class _IndividualSpecifier(BaseSpecifier): - _operators = {} # type: Dict[str, str] + _operators: Dict[str, str] = {} + _regex: Pattern[str] - def __init__(self, spec="", prereleases=None): - # type: (str, Optional[bool]) -> None + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: match = self._regex.search(spec) if not match: - raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") - self._spec = ( + self._spec: Tuple[str, str] = ( match.group("operator").strip(), match.group("version").strip(), - ) # type: Tuple[str, str] + ) # Store whether or not this Specifier should accept prereleases self._prereleases = prereleases - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: pre = ( - ", prereleases={0!r}".format(self.prereleases) + f", prereleases={self.prereleases!r}" if self._prereleases is not None else "" ) - return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre) + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" - def __str__(self): - # type: () -> str - return "{0}{1}".format(*self._spec) + def __str__(self) -> str: + return "{}{}".format(*self._spec) @property - def _canonical_spec(self): - # type: () -> Tuple[str, Union[Version, str]] + def _canonical_spec(self) -> Tuple[str, str]: return self._spec[0], canonicalize_version(self._spec[1]) - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(self._canonical_spec) - def __eq__(self, other): - # type: (object) -> bool - if isinstance(other, string_types): + def __eq__(self, other: object) -> bool: + if isinstance(other, str): try: other = self.__class__(str(other)) except InvalidSpecifier: @@ -145,57 +135,39 @@ class _IndividualSpecifier(BaseSpecifier): return self._canonical_spec == other._canonical_spec - def __ne__(self, other): - # type: (object) -> bool - if isinstance(other, string_types): - try: - other = self.__class__(str(other)) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec != other._spec - - def _get_operator(self, op): - # type: (str) -> CallableOperator - operator_callable = getattr( - self, "_compare_{0}".format(self._operators[op]) - ) # type: CallableOperator + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) return operator_callable - def _coerce_version(self, version): - # type: (UnparsedVersion) -> ParsedVersion + def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: if not isinstance(version, (LegacyVersion, Version)): version = parse(version) return version @property - def operator(self): - # type: () -> str + def operator(self) -> str: return self._spec[0] @property - def version(self): - # type: () -> str + def version(self) -> str: return self._spec[1] @property - def prereleases(self): - # type: () -> Optional[bool] + def prereleases(self) -> Optional[bool]: return self._prereleases @prereleases.setter - def prereleases(self, value): - # type: (bool) -> None + def prereleases(self, value: bool) -> None: self._prereleases = value - def __contains__(self, item): - # type: (str) -> bool + def __contains__(self, item: str) -> bool: return self.contains(item) - def contains(self, item, prereleases=None): - # type: (UnparsedVersion, Optional[bool]) -> bool + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: # Determine if prereleases are to be allowed or not. if prereleases is None: @@ -213,11 +185,12 @@ class _IndividualSpecifier(BaseSpecifier): # Actually do the comparison to determine if this item is contained # within this Specifier or not. - operator_callable = self._get_operator(self.operator) # type: CallableOperator + operator_callable: CallableOperator = self._get_operator(self.operator) return operator_callable(normalized_item, self.version) - def filter(self, iterable, prereleases=None): - # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion] + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: yielded = False found_prereleases = [] @@ -231,7 +204,7 @@ class _IndividualSpecifier(BaseSpecifier): if self.contains(parsed_version, **kw): # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later incase nothing + # prereleases, then we'll store it for later in case nothing # else matches this specifier. if parsed_version.is_prerelease and not ( prereleases or self.prereleases @@ -276,9 +249,8 @@ class LegacySpecifier(_IndividualSpecifier): ">": "greater_than", } - def __init__(self, spec="", prereleases=None): - # type: (str, Optional[bool]) -> None - super(LegacySpecifier, self).__init__(spec, prereleases) + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + super().__init__(spec, prereleases) warnings.warn( "Creating a LegacyVersion has been deprecated and will be " @@ -286,44 +258,37 @@ class LegacySpecifier(_IndividualSpecifier): DeprecationWarning, ) - def _coerce_version(self, version): - # type: (Union[ParsedVersion, str]) -> LegacyVersion + def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: if not isinstance(version, LegacyVersion): version = LegacyVersion(str(version)) return version - def _compare_equal(self, prospective, spec): - # type: (LegacyVersion, str) -> bool + def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: return prospective == self._coerce_version(spec) - def _compare_not_equal(self, prospective, spec): - # type: (LegacyVersion, str) -> bool + def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: return prospective != self._coerce_version(spec) - def _compare_less_than_equal(self, prospective, spec): - # type: (LegacyVersion, str) -> bool + def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: return prospective <= self._coerce_version(spec) - def _compare_greater_than_equal(self, prospective, spec): - # type: (LegacyVersion, str) -> bool + def _compare_greater_than_equal( + self, prospective: LegacyVersion, spec: str + ) -> bool: return prospective >= self._coerce_version(spec) - def _compare_less_than(self, prospective, spec): - # type: (LegacyVersion, str) -> bool + def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: return prospective < self._coerce_version(spec) - def _compare_greater_than(self, prospective, spec): - # type: (LegacyVersion, str) -> bool + def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: return prospective > self._coerce_version(spec) def _require_version_compare( - fn, # type: (Callable[[Specifier, ParsedVersion, str], bool]) -): - # type: (...) -> Callable[[Specifier, ParsedVersion, str], bool] + fn: Callable[["Specifier", ParsedVersion, str], bool] +) -> Callable[["Specifier", ParsedVersion, str], bool]: @functools.wraps(fn) - def wrapped(self, prospective, spec): - # type: (Specifier, ParsedVersion, str) -> bool + def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: if not isinstance(prospective, Version): return False return fn(self, prospective, spec) @@ -440,8 +405,7 @@ class Specifier(_IndividualSpecifier): } @_require_version_compare - def _compare_compatible(self, prospective, spec): - # type: (ParsedVersion, str) -> bool + def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to @@ -450,15 +414,9 @@ class Specifier(_IndividualSpecifier): # the other specifiers. # We want everything but the last item in the version, but we want to - # ignore post and dev releases and we want to treat the pre-release as - # it's own separate segment. + # ignore suffix segments. prefix = ".".join( - list( - itertools.takewhile( - lambda x: (not x.startswith("post") and not x.startswith("dev")), - _version_split(spec), - ) - )[:-1] + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] ) # Add the prefix notation to the end of our string @@ -469,8 +427,7 @@ class Specifier(_IndividualSpecifier): ) @_require_version_compare - def _compare_equal(self, prospective, spec): - # type: (ParsedVersion, str) -> bool + def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: # We need special logic to handle prefix matching if spec.endswith(".*"): @@ -510,13 +467,11 @@ class Specifier(_IndividualSpecifier): return prospective == spec_version @_require_version_compare - def _compare_not_equal(self, prospective, spec): - # type: (ParsedVersion, str) -> bool + def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: return not self._compare_equal(prospective, spec) @_require_version_compare - def _compare_less_than_equal(self, prospective, spec): - # type: (ParsedVersion, str) -> bool + def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from @@ -524,8 +479,9 @@ class Specifier(_IndividualSpecifier): return Version(prospective.public) <= Version(spec) @_require_version_compare - def _compare_greater_than_equal(self, prospective, spec): - # type: (ParsedVersion, str) -> bool + def _compare_greater_than_equal( + self, prospective: ParsedVersion, spec: str + ) -> bool: # NB: Local version identifiers are NOT permitted in the version # specifier, so local version labels can be universally removed from @@ -533,8 +489,7 @@ class Specifier(_IndividualSpecifier): return Version(prospective.public) >= Version(spec) @_require_version_compare - def _compare_less_than(self, prospective, spec_str): - # type: (ParsedVersion, str) -> bool + def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. @@ -560,8 +515,7 @@ class Specifier(_IndividualSpecifier): return True @_require_version_compare - def _compare_greater_than(self, prospective, spec_str): - # type: (ParsedVersion, str) -> bool + def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: # Convert our spec to a Version instance, since we'll want to work with # it as a version. @@ -592,13 +546,11 @@ class Specifier(_IndividualSpecifier): # same version in the spec. return True - def _compare_arbitrary(self, prospective, spec): - # type: (Version, str) -> bool + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: return str(prospective).lower() == str(spec).lower() @property - def prereleases(self): - # type: () -> bool + def prereleases(self) -> bool: # If there is an explicit prereleases set for this, then we'll just # blindly use that. @@ -623,17 +575,15 @@ class Specifier(_IndividualSpecifier): return False @prereleases.setter - def prereleases(self, value): - # type: (bool) -> None + def prereleases(self, value: bool) -> None: self._prereleases = value _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") -def _version_split(version): - # type: (str) -> List[str] - result = [] # type: List[str] +def _version_split(version: str) -> List[str]: + result: List[str] = [] for item in version.split("."): match = _prefix_regex.search(item) if match: @@ -643,8 +593,13 @@ def _version_split(version): return result -def _pad_version(left, right): - # type: (List[str], List[str]) -> Tuple[List[str], List[str]] +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: left_split, right_split = [], [] # Get the release segment of our versions @@ -663,8 +618,9 @@ def _pad_version(left, right): class SpecifierSet(BaseSpecifier): - def __init__(self, specifiers="", prereleases=None): - # type: (str, Optional[bool]) -> None + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: # Split on , to break each individual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. @@ -672,7 +628,7 @@ class SpecifierSet(BaseSpecifier): # Parsed each individual specifier, attempting first to make it a # Specifier and falling back to a LegacySpecifier. - parsed = set() + parsed: Set[_IndividualSpecifier] = set() for specifier in split_specifiers: try: parsed.add(Specifier(specifier)) @@ -686,27 +642,23 @@ class SpecifierSet(BaseSpecifier): # we accept prereleases or not. self._prereleases = prereleases - def __repr__(self): - # type: () -> str + def __repr__(self) -> str: pre = ( - ", prereleases={0!r}".format(self.prereleases) + f", prereleases={self.prereleases!r}" if self._prereleases is not None else "" ) - return "<SpecifierSet({0!r}{1})>".format(str(self), pre) + return f"<SpecifierSet({str(self)!r}{pre})>" - def __str__(self): - # type: () -> str + def __str__(self) -> str: return ",".join(sorted(str(s) for s in self._specs)) - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(self._specs) - def __and__(self, other): - # type: (Union[SpecifierSet, str]) -> SpecifierSet - if isinstance(other, string_types): + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + if isinstance(other, str): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): return NotImplemented @@ -728,35 +680,22 @@ class SpecifierSet(BaseSpecifier): return specifier - def __eq__(self, other): - # type: (object) -> bool - if isinstance(other, (string_types, _IndividualSpecifier)): + def __eq__(self, other: object) -> bool: + if isinstance(other, (str, _IndividualSpecifier)): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs == other._specs - def __ne__(self, other): - # type: (object) -> bool - if isinstance(other, (string_types, _IndividualSpecifier)): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs != other._specs - - def __len__(self): - # type: () -> int + def __len__(self) -> int: return len(self._specs) - def __iter__(self): - # type: () -> Iterator[_IndividualSpecifier] + def __iter__(self) -> Iterator[_IndividualSpecifier]: return iter(self._specs) @property - def prereleases(self): - # type: () -> Optional[bool] + def prereleases(self) -> Optional[bool]: # If we have been given an explicit prerelease modifier, then we'll # pass that through here. @@ -774,16 +713,15 @@ class SpecifierSet(BaseSpecifier): return any(s.prereleases for s in self._specs) @prereleases.setter - def prereleases(self, value): - # type: (bool) -> None + def prereleases(self, value: bool) -> None: self._prereleases = value - def __contains__(self, item): - # type: (Union[ParsedVersion, str]) -> bool + def __contains__(self, item: UnparsedVersion) -> bool: return self.contains(item) - def contains(self, item, prereleases=None): - # type: (Union[ParsedVersion, str], Optional[bool]) -> bool + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: # Ensure that our item is a Version or LegacyVersion instance. if not isinstance(item, (LegacyVersion, Version)): @@ -811,11 +749,8 @@ class SpecifierSet(BaseSpecifier): return all(s.contains(item, prereleases=prereleases) for s in self._specs) def filter( - self, - iterable, # type: Iterable[Union[ParsedVersion, str]] - prereleases=None, # type: Optional[bool] - ): - # type: (...) -> Iterable[Union[ParsedVersion, str]] + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the @@ -834,8 +769,11 @@ class SpecifierSet(BaseSpecifier): # which will filter out any pre-releases, unless there are no final # releases, and which will filter out LegacyVersion in general. else: - filtered = [] # type: List[Union[ParsedVersion, str]] - found_prereleases = [] # type: List[Union[ParsedVersion, str]] + filtered: List[VersionTypeVar] = [] + found_prereleases: List[VersionTypeVar] = [] + + item: UnparsedVersion + parsed_version: Union[Version, LegacyVersion] for item in iterable: # Ensure that we some kind of Version class for this item. diff --git a/env/Lib/site-packages/pip/_vendor/packaging/tags.py b/env/Lib/site-packages/pip/_vendor/packaging/tags.py index d637f1b6993c441ee883d7bceb9cc26612242b06..9a3d25a71c75c975291cf987001ecd6882d6417d 100644 --- a/env/Lib/site-packages/pip/_vendor/packaging/tags.py +++ b/env/Lib/site-packages/pip/_vendor/packaging/tags.py @@ -2,81 +2,44 @@ # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import - -import distutils.util - -try: - from importlib.machinery import EXTENSION_SUFFIXES -except ImportError: # pragma: no cover - import imp - - EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()] - del imp -import collections import logging -import os import platform -import re -import struct import sys import sysconfig -import warnings - -from ._typing import TYPE_CHECKING, cast - -if TYPE_CHECKING: # pragma: no cover - from typing import ( - IO, - Dict, - FrozenSet, - Iterable, - Iterator, - List, - Optional, - Sequence, - Tuple, - Union, - ) - - PythonVersion = Sequence[int] - MacVersion = Tuple[int, int] - GlibcVersion = Tuple[int, int] - +from importlib.machinery import EXTENSION_SUFFIXES +from typing import ( + Dict, + FrozenSet, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, + cast, +) + +from . import _manylinux, _musllinux logger = logging.getLogger(__name__) -INTERPRETER_SHORT_NAMES = { +PythonVersion = Sequence[int] +MacVersion = Tuple[int, int] + +INTERPRETER_SHORT_NAMES: Dict[str, str] = { "python": "py", # Generic. "cpython": "cp", "pypy": "pp", "ironpython": "ip", "jython": "jy", -} # type: Dict[str, str] +} _32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 -_LEGACY_MANYLINUX_MAP = { - # CentOS 7 w/ glibc 2.17 (PEP 599) - (2, 17): "manylinux2014", - # CentOS 6 w/ glibc 2.12 (PEP 571) - (2, 12): "manylinux2010", - # CentOS 5 w/ glibc 2.5 (PEP 513) - (2, 5): "manylinux1", -} - -# If glibc ever changes its major version, we need to know what the last -# minor version was, so we can build the complete list of all versions. -# For now, guess what the highest minor version might be, assume it will -# be 50 for testing. Once this actually happens, update the dictionary -# with the actual value. -_LAST_GLIBC_MINOR = collections.defaultdict(lambda: 50) # type: Dict[int, int] -glibcVersion = collections.namedtuple("Version", ["major", "minor"]) - - -class Tag(object): +class Tag: """ A representation of the tag triple for a wheel. @@ -86,8 +49,7 @@ class Tag(object): __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] - def __init__(self, interpreter, abi, platform): - # type: (str, str, str) -> None + def __init__(self, interpreter: str, abi: str, platform: str) -> None: self._interpreter = interpreter.lower() self._abi = abi.lower() self._platform = platform.lower() @@ -99,46 +61,39 @@ class Tag(object): self._hash = hash((self._interpreter, self._abi, self._platform)) @property - def interpreter(self): - # type: () -> str + def interpreter(self) -> str: return self._interpreter @property - def abi(self): - # type: () -> str + def abi(self) -> str: return self._abi @property - def platform(self): - # type: () -> str + def platform(self) -> str: return self._platform - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, Tag): return NotImplemented return ( - (self.platform == other.platform) - and (self.abi == other.abi) - and (self.interpreter == other.interpreter) + (self._hash == other._hash) # Short-circuit ASAP for perf reasons. + and (self._platform == other._platform) + and (self._abi == other._abi) + and (self._interpreter == other._interpreter) ) - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return self._hash - def __str__(self): - # type: () -> str - return "{}-{}-{}".format(self._interpreter, self._abi, self._platform) + def __str__(self) -> str: + return f"{self._interpreter}-{self._abi}-{self._platform}" - def __repr__(self): - # type: () -> str - return "<{self} @ {self_id}>".format(self=self, self_id=id(self)) + def __repr__(self) -> str: + return f"<{self} @ {id(self)}>" -def parse_tag(tag): - # type: (str) -> FrozenSet[Tag] +def parse_tag(tag: str) -> FrozenSet[Tag]: """ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. @@ -154,24 +109,7 @@ def parse_tag(tag): return frozenset(tags) -def _warn_keyword_parameter(func_name, kwargs): - # type: (str, Dict[str, bool]) -> bool - """ - Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only. - """ - if not kwargs: - return False - elif len(kwargs) > 1 or "warn" not in kwargs: - kwargs.pop("warn", None) - arg = next(iter(kwargs.keys())) - raise TypeError( - "{}() got an unexpected keyword argument {!r}".format(func_name, arg) - ) - return kwargs["warn"] - - -def _get_config_var(name, warn=False): - # type: (str, bool) -> Union[int, str, None] +def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: value = sysconfig.get_config_var(name) if value is None and warn: logger.debug( @@ -180,13 +118,11 @@ def _get_config_var(name, warn=False): return value -def _normalize_string(string): - # type: (str) -> str +def _normalize_string(string: str) -> str: return string.replace(".", "_").replace("-", "_") -def _abi3_applies(python_version): - # type: (PythonVersion) -> bool +def _abi3_applies(python_version: PythonVersion) -> bool: """ Determine if the Python version supports abi3. @@ -195,8 +131,7 @@ def _abi3_applies(python_version): return len(python_version) > 1 and tuple(python_version) >= (3, 2) -def _cpython_abis(py_version, warn=False): - # type: (PythonVersion, bool) -> List[str] +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: py_version = tuple(py_version) # To allow for version comparison. abis = [] version = _version_nodot(py_version[:2]) @@ -222,7 +157,7 @@ def _cpython_abis(py_version, warn=False): elif debug: # Debug builds can also load "normal" extension modules. # We can also assume no UCS-4 or pymalloc requirement. - abis.append("cp{version}".format(version=version)) + abis.append(f"cp{version}") abis.insert( 0, "cp{version}{debug}{pymalloc}{ucs4}".format( @@ -233,12 +168,12 @@ def _cpython_abis(py_version, warn=False): def cpython_tags( - python_version=None, # type: Optional[PythonVersion] - abis=None, # type: Optional[Iterable[str]] - platforms=None, # type: Optional[Iterable[str]] - **kwargs # type: bool -): - # type: (...) -> Iterator[Tag] + python_version: Optional[PythonVersion] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: """ Yields the tags for a CPython interpreter. @@ -254,11 +189,10 @@ def cpython_tags( If 'abi3' or 'none' are specified in 'abis' then they will be yielded at their normal position and not at the beginning. """ - warn = _warn_keyword_parameter("cpython_tags", kwargs) if not python_version: python_version = sys.version_info[:2] - interpreter = "cp{}".format(_version_nodot(python_version[:2])) + interpreter = f"cp{_version_nodot(python_version[:2])}" if abis is None: if len(python_version) > 1: @@ -273,15 +207,13 @@ def cpython_tags( except ValueError: pass - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) for abi in abis: for platform_ in platforms: yield Tag(interpreter, abi, platform_) if _abi3_applies(python_version): - for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms): - yield tag - for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms): - yield tag + yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) + yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) if _abi3_applies(python_version): for minor_version in range(python_version[1] - 1, 1, -1): @@ -292,20 +224,19 @@ def cpython_tags( yield Tag(interpreter, "abi3", platform_) -def _generic_abi(): - # type: () -> Iterator[str] +def _generic_abi() -> Iterator[str]: abi = sysconfig.get_config_var("SOABI") if abi: yield _normalize_string(abi) def generic_tags( - interpreter=None, # type: Optional[str] - abis=None, # type: Optional[Iterable[str]] - platforms=None, # type: Optional[Iterable[str]] - **kwargs # type: bool -): - # type: (...) -> Iterator[Tag] + interpreter: Optional[str] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: """ Yields the tags for a generic interpreter. @@ -314,14 +245,13 @@ def generic_tags( The "none" ABI will be added if it was not explicitly provided. """ - warn = _warn_keyword_parameter("generic_tags", kwargs) if not interpreter: interp_name = interpreter_name() interp_version = interpreter_version(warn=warn) interpreter = "".join([interp_name, interp_version]) if abis is None: abis = _generic_abi() - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) abis = list(abis) if "none" not in abis: abis.append("none") @@ -330,8 +260,7 @@ def generic_tags( yield Tag(interpreter, abi, platform_) -def _py_interpreter_range(py_version): - # type: (PythonVersion) -> Iterator[str] +def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: """ Yields Python versions in descending order. @@ -339,19 +268,18 @@ def _py_interpreter_range(py_version): all previous versions of that major version. """ if len(py_version) > 1: - yield "py{version}".format(version=_version_nodot(py_version[:2])) - yield "py{major}".format(major=py_version[0]) + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" if len(py_version) > 1: for minor in range(py_version[1] - 1, -1, -1): - yield "py{version}".format(version=_version_nodot((py_version[0], minor))) + yield f"py{_version_nodot((py_version[0], minor))}" def compatible_tags( - python_version=None, # type: Optional[PythonVersion] - interpreter=None, # type: Optional[str] - platforms=None, # type: Optional[Iterable[str]] -): - # type: (...) -> Iterator[Tag] + python_version: Optional[PythonVersion] = None, + interpreter: Optional[str] = None, + platforms: Optional[Iterable[str]] = None, +) -> Iterator[Tag]: """ Yields the sequence of tags that are compatible with a specific version of Python. @@ -362,7 +290,7 @@ def compatible_tags( """ if not python_version: python_version = sys.version_info[:2] - platforms = list(platforms or _platform_tags()) + platforms = list(platforms or platform_tags()) for version in _py_interpreter_range(python_version): for platform_ in platforms: yield Tag(version, "none", platform_) @@ -372,8 +300,7 @@ def compatible_tags( yield Tag(version, "none", "any") -def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER): - # type: (str, bool) -> str +def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: if not is_32bit: return arch @@ -383,8 +310,7 @@ def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER): return "i386" -def _mac_binary_formats(version, cpu_arch): - # type: (MacVersion, str) -> List[str] +def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: formats = [cpu_arch] if cpu_arch == "x86_64": if version < (10, 4): @@ -416,8 +342,9 @@ def _mac_binary_formats(version, cpu_arch): return formats -def mac_platforms(version=None, arch=None): - # type: (Optional[MacVersion], Optional[str]) -> Iterator[str] +def mac_platforms( + version: Optional[MacVersion] = None, arch: Optional[str] = None +) -> Iterator[str]: """ Yields the platform tags for a macOS system. @@ -426,7 +353,7 @@ def mac_platforms(version=None, arch=None): generate platform tags for. Both parameters default to the appropriate value for the current system. """ - version_str, _, cpu_arch = platform.mac_ver() # type: ignore + version_str, _, cpu_arch = platform.mac_ver() if version is None: version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) else: @@ -487,320 +414,24 @@ def mac_platforms(version=None, arch=None): ) -# From PEP 513, PEP 600 -def _is_manylinux_compatible(name, arch, glibc_version): - # type: (str, str, GlibcVersion) -> bool - sys_glibc = _get_glibc_version() - if sys_glibc < glibc_version: - return False - # Check for presence of _manylinux module. - try: - import _manylinux # noqa - except ImportError: - pass - else: - if hasattr(_manylinux, "manylinux_compatible"): - result = _manylinux.manylinux_compatible( - glibc_version[0], glibc_version[1], arch - ) - if result is not None: - return bool(result) - else: - if glibc_version == (2, 5): - if hasattr(_manylinux, "manylinux1_compatible"): - return bool(_manylinux.manylinux1_compatible) - if glibc_version == (2, 12): - if hasattr(_manylinux, "manylinux2010_compatible"): - return bool(_manylinux.manylinux2010_compatible) - if glibc_version == (2, 17): - if hasattr(_manylinux, "manylinux2014_compatible"): - return bool(_manylinux.manylinux2014_compatible) - return True - - -def _glibc_version_string(): - # type: () -> Optional[str] - # Returns glibc version string, or None if not using glibc. - return _glibc_version_string_confstr() or _glibc_version_string_ctypes() - - -def _glibc_version_string_confstr(): - # type: () -> Optional[str] - """ - Primary implementation of glibc_version_string using os.confstr. - """ - # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely - # to be broken or missing. This strategy is used in the standard library - # platform module. - # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183 - try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". - version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821 - "CS_GNU_LIBC_VERSION" - ) - assert version_string is not None - _, version = version_string.split() # type: Tuple[str, str] - except (AssertionError, AttributeError, OSError, ValueError): - # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... - return None - return version - - -def _glibc_version_string_ctypes(): - # type: () -> Optional[str] - """ - Fallback implementation of glibc_version_string using ctypes. - """ - try: - import ctypes - except ImportError: - return None - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - # - # We must also handle the special case where the executable is not a - # dynamically linked executable. This can occur when using musl libc, - # for example. In this situation, dlopen() will error, leading to an - # OSError. Interestingly, at least in the case of musl, there is no - # errno set on the OSError. The single string argument used to construct - # OSError comes from libc itself and is therefore not portable to - # hard code here. In any case, failure to call dlopen() means we - # can proceed, so we bail on our attempt. - try: - # Note: typeshed is wrong here so we are ignoring this line. - process_namespace = ctypes.CDLL(None) # type: ignore - except OSError: - return None - - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str = gnu_get_libc_version() # type: str - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -def _parse_glibc_version(version_str): - # type: (str) -> Tuple[int, int] - # Parse glibc version. - # - # We use a regexp instead of str.split because we want to discard any - # random junk that might come after the minor version -- this might happen - # in patched/forked versions of glibc (e.g. Linaro's version of glibc - # uses version strings like "2.20-2014.11"). See gh-3588. - m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) - if not m: - warnings.warn( - "Expected glibc version with 2 components major.minor," - " got: %s" % version_str, - RuntimeWarning, - ) - return -1, -1 - return (int(m.group("major")), int(m.group("minor"))) - - -_glibc_version = [] # type: List[Tuple[int, int]] - - -def _get_glibc_version(): - # type: () -> Tuple[int, int] - if _glibc_version: - return _glibc_version[0] - version_str = _glibc_version_string() - if version_str is None: - _glibc_version.append((-1, -1)) - else: - _glibc_version.append(_parse_glibc_version(version_str)) - return _glibc_version[0] - - -# Python does not provide platform information at sufficient granularity to -# identify the architecture of the running executable in some cases, so we -# determine it dynamically by reading the information from the running -# process. This only applies on Linux, which uses the ELF format. -class _ELFFileHeader(object): - # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - class _InvalidELFFileHeader(ValueError): - """ - An invalid ELF file header was found. - """ - - ELF_MAGIC_NUMBER = 0x7F454C46 - ELFCLASS32 = 1 - ELFCLASS64 = 2 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 - EM_386 = 3 - EM_S390 = 22 - EM_ARM = 40 - EM_X86_64 = 62 - EF_ARM_ABIMASK = 0xFF000000 - EF_ARM_ABI_VER5 = 0x05000000 - EF_ARM_ABI_FLOAT_HARD = 0x00000400 - - def __init__(self, file): - # type: (IO[bytes]) -> None - def unpack(fmt): - # type: (str) -> int - try: - (result,) = struct.unpack( - fmt, file.read(struct.calcsize(fmt)) - ) # type: (int, ) - except struct.error: - raise _ELFFileHeader._InvalidELFFileHeader() - return result - - self.e_ident_magic = unpack(">I") - if self.e_ident_magic != self.ELF_MAGIC_NUMBER: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_class = unpack("B") - if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_data = unpack("B") - if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_version = unpack("B") - self.e_ident_osabi = unpack("B") - self.e_ident_abiversion = unpack("B") - self.e_ident_pad = file.read(7) - format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H" - format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I" - format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q" - format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q - self.e_type = unpack(format_h) - self.e_machine = unpack(format_h) - self.e_version = unpack(format_i) - self.e_entry = unpack(format_p) - self.e_phoff = unpack(format_p) - self.e_shoff = unpack(format_p) - self.e_flags = unpack(format_i) - self.e_ehsize = unpack(format_h) - self.e_phentsize = unpack(format_h) - self.e_phnum = unpack(format_h) - self.e_shentsize = unpack(format_h) - self.e_shnum = unpack(format_h) - self.e_shstrndx = unpack(format_h) - - -def _get_elf_header(): - # type: () -> Optional[_ELFFileHeader] - try: - with open(sys.executable, "rb") as f: - elf_header = _ELFFileHeader(f) - except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): - return None - return elf_header - - -def _is_linux_armhf(): - # type: () -> bool - # hard-float ABI can be detected from the ELF header of the running - # process - # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_ARM - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABIMASK - ) == elf_header.EF_ARM_ABI_VER5 - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD - ) == elf_header.EF_ARM_ABI_FLOAT_HARD - return result - - -def _is_linux_i686(): - # type: () -> bool - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_386 - return result - - -def _have_compatible_manylinux_abi(arch): - # type: (str) -> bool - if arch == "armv7l": - return _is_linux_armhf() - if arch == "i686": - return _is_linux_i686() - return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} - - -def _manylinux_tags(linux, arch): - # type: (str, str) -> Iterator[str] - # Oldest glibc to be supported regardless of architecture is (2, 17). - too_old_glibc2 = glibcVersion(2, 16) - if arch in {"x86_64", "i686"}: - # On x86/i686 also oldest glibc to be supported is (2, 5). - too_old_glibc2 = glibcVersion(2, 4) - current_glibc = glibcVersion(*_get_glibc_version()) - glibc_max_list = [current_glibc] - # We can assume compatibility across glibc major versions. - # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 - # - # Build a list of maximum glibc versions so that we can - # output the canonical list of all glibc from current_glibc - # down to too_old_glibc2, including all intermediary versions. - for glibc_major in range(current_glibc.major - 1, 1, -1): - glibc_max_list.append(glibcVersion(glibc_major, _LAST_GLIBC_MINOR[glibc_major])) - for glibc_max in glibc_max_list: - if glibc_max.major == too_old_glibc2.major: - min_minor = too_old_glibc2.minor - else: - # For other glibc major versions oldest supported is (x, 0). - min_minor = -1 - for glibc_minor in range(glibc_max.minor, min_minor, -1): - glibc_version = (glibc_max.major, glibc_minor) - tag = "manylinux_{}_{}".format(*glibc_version) - if _is_manylinux_compatible(tag, arch, glibc_version): - yield linux.replace("linux", tag) - # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. - if glibc_version in _LEGACY_MANYLINUX_MAP: - legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] - if _is_manylinux_compatible(legacy_tag, arch, glibc_version): - yield linux.replace("linux", legacy_tag) - - -def _linux_platforms(is_32bit=_32_BIT_INTERPRETER): - # type: (bool) -> Iterator[str] - linux = _normalize_string(distutils.util.get_platform()) +def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: + linux = _normalize_string(sysconfig.get_platform()) if is_32bit: if linux == "linux_x86_64": linux = "linux_i686" elif linux == "linux_aarch64": linux = "linux_armv7l" _, arch = linux.split("_", 1) - if _have_compatible_manylinux_abi(arch): - for tag in _manylinux_tags(linux, arch): - yield tag + yield from _manylinux.platform_tags(linux, arch) + yield from _musllinux.platform_tags(arch) yield linux -def _generic_platforms(): - # type: () -> Iterator[str] - yield _normalize_string(distutils.util.get_platform()) +def _generic_platforms() -> Iterator[str]: + yield _normalize_string(sysconfig.get_platform()) -def _platform_tags(): - # type: () -> Iterator[str] +def platform_tags() -> Iterator[str]: """ Provides the platform tags for this installation. """ @@ -812,25 +443,18 @@ def _platform_tags(): return _generic_platforms() -def interpreter_name(): - # type: () -> str +def interpreter_name() -> str: """ Returns the name of the running interpreter. """ - try: - name = sys.implementation.name # type: ignore - except AttributeError: # pragma: no cover - # Python 2.7 compatibility. - name = platform.python_implementation().lower() + name = sys.implementation.name return INTERPRETER_SHORT_NAMES.get(name) or name -def interpreter_version(**kwargs): - # type: (bool) -> str +def interpreter_version(*, warn: bool = False) -> str: """ Returns the version of the running interpreter. """ - warn = _warn_keyword_parameter("interpreter_version", kwargs) version = _get_config_var("py_version_nodot", warn=warn) if version: version = str(version) @@ -839,28 +463,25 @@ def interpreter_version(**kwargs): return version -def _version_nodot(version): - # type: (PythonVersion) -> str +def _version_nodot(version: PythonVersion) -> str: return "".join(map(str, version)) -def sys_tags(**kwargs): - # type: (bool) -> Iterator[Tag] +def sys_tags(*, warn: bool = False) -> Iterator[Tag]: """ Returns the sequence of tag triples for the running interpreter. The order of the sequence corresponds to priority order for the interpreter, from most to least important. """ - warn = _warn_keyword_parameter("sys_tags", kwargs) interp_name = interpreter_name() if interp_name == "cp": - for tag in cpython_tags(warn=warn): - yield tag + yield from cpython_tags(warn=warn) else: - for tag in generic_tags(): - yield tag + yield from generic_tags() - for tag in compatible_tags(): - yield tag + if interp_name == "pp": + yield from compatible_tags(interpreter="pp3") + else: + yield from compatible_tags() diff --git a/env/Lib/site-packages/pip/_vendor/packaging/utils.py b/env/Lib/site-packages/pip/_vendor/packaging/utils.py index 6e8c2a3e5bb3ddccb5663b1799ec07d644adcf7f..bab11b80c60f10a4f3bccb12eb5b17c48a449767 100644 --- a/env/Lib/site-packages/pip/_vendor/packaging/utils.py +++ b/env/Lib/site-packages/pip/_vendor/packaging/utils.py @@ -1,22 +1,15 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function import re +from typing import FrozenSet, NewType, Tuple, Union, cast -from ._typing import TYPE_CHECKING, cast from .tags import Tag, parse_tag from .version import InvalidVersion, Version -if TYPE_CHECKING: # pragma: no cover - from typing import FrozenSet, NewType, Tuple, Union - - BuildTag = Union[Tuple[()], Tuple[int, str]] - NormalizedName = NewType("NormalizedName", str) -else: - BuildTag = tuple - NormalizedName = str +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) class InvalidWheelFilename(ValueError): @@ -36,74 +29,75 @@ _canonicalize_regex = re.compile(r"[-_.]+") _build_tag_regex = re.compile(r"(\d+)(.*)") -def canonicalize_name(name): - # type: (str) -> NormalizedName +def canonicalize_name(name: str) -> NormalizedName: # This is taken from PEP 503. value = _canonicalize_regex.sub("-", name).lower() return cast(NormalizedName, value) -def canonicalize_version(version): - # type: (Union[Version, str]) -> Union[Version, str] +def canonicalize_version(version: Union[Version, str]) -> str: """ This is very similar to Version.__str__, but has one subtle difference with the way it handles the release segment. """ - if not isinstance(version, Version): + if isinstance(version, str): try: - version = Version(version) + parsed = Version(version) except InvalidVersion: # Legacy versions cannot be normalized return version + else: + parsed = version parts = [] # Epoch - if version.epoch != 0: - parts.append("{0}!".format(version.epoch)) + if parsed.epoch != 0: + parts.append(f"{parsed.epoch}!") # Release segment # NB: This strips trailing '.0's to normalize - parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release))) + parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) # Pre-release - if version.pre is not None: - parts.append("".join(str(x) for x in version.pre)) + if parsed.pre is not None: + parts.append("".join(str(x) for x in parsed.pre)) # Post-release - if version.post is not None: - parts.append(".post{0}".format(version.post)) + if parsed.post is not None: + parts.append(f".post{parsed.post}") # Development release - if version.dev is not None: - parts.append(".dev{0}".format(version.dev)) + if parsed.dev is not None: + parts.append(f".dev{parsed.dev}") # Local version segment - if version.local is not None: - parts.append("+{0}".format(version.local)) + if parsed.local is not None: + parts.append(f"+{parsed.local}") return "".join(parts) -def parse_wheel_filename(filename): - # type: (str) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]] +def parse_wheel_filename( + filename: str, +) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: if not filename.endswith(".whl"): raise InvalidWheelFilename( - "Invalid wheel filename (extension must be '.whl'): {0}".format(filename) + f"Invalid wheel filename (extension must be '.whl'): {filename}" ) filename = filename[:-4] dashes = filename.count("-") if dashes not in (4, 5): raise InvalidWheelFilename( - "Invalid wheel filename (wrong number of parts): {0}".format(filename) + f"Invalid wheel filename (wrong number of parts): {filename}" ) parts = filename.split("-", dashes - 2) name_part = parts[0] # See PEP 427 for the rules on escaping the project name if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: - raise InvalidWheelFilename("Invalid project name: {0}".format(filename)) + raise InvalidWheelFilename(f"Invalid project name: {filename}") name = canonicalize_name(name_part) version = Version(parts[1]) if dashes == 5: @@ -111,7 +105,7 @@ def parse_wheel_filename(filename): build_match = _build_tag_regex.match(build_part) if build_match is None: raise InvalidWheelFilename( - "Invalid build number: {0} in '{1}'".format(build_part, filename) + f"Invalid build number: {build_part} in '{filename}'" ) build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) else: @@ -120,18 +114,22 @@ def parse_wheel_filename(filename): return (name, version, build, tags) -def parse_sdist_filename(filename): - # type: (str) -> Tuple[NormalizedName, Version] - if not filename.endswith(".tar.gz"): +def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: raise InvalidSdistFilename( - "Invalid sdist filename (extension must be '.tar.gz'): {0}".format(filename) + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename}" ) # We are requiring a PEP 440 version, which cannot contain dashes, # so we split on the last dash. - name_part, sep, version_part = filename[:-7].rpartition("-") + name_part, sep, version_part = file_stem.rpartition("-") if not sep: - raise InvalidSdistFilename("Invalid sdist filename: {0}".format(filename)) + raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") name = canonicalize_name(name_part) version = Version(version_part) diff --git a/env/Lib/site-packages/pip/_vendor/packaging/version.py b/env/Lib/site-packages/pip/_vendor/packaging/version.py index 517d91f24859e7f18f4f7fbd6d6cf2e56d97a829..de9a09a4ed3b078b37e7490a6686f660ae935aca 100644 --- a/env/Lib/site-packages/pip/_vendor/packaging/version.py +++ b/env/Lib/site-packages/pip/_vendor/packaging/version.py @@ -1,53 +1,45 @@ # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. -from __future__ import absolute_import, division, print_function import collections import itertools import re import warnings +from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union -from ._structures import Infinity, NegativeInfinity -from ._typing import TYPE_CHECKING - -if TYPE_CHECKING: # pragma: no cover - from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union - - from ._structures import InfinityType, NegativeInfinityType - - InfiniteTypes = Union[InfinityType, NegativeInfinityType] - PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] - SubLocalType = Union[InfiniteTypes, int, str] - LocalType = Union[ - NegativeInfinityType, - Tuple[ - Union[ - SubLocalType, - Tuple[SubLocalType, str], - Tuple[NegativeInfinityType, SubLocalType], - ], - ..., - ], - ] - CmpKey = Tuple[ - int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType - ] - LegacyCmpKey = Tuple[int, Tuple[str, ...]] - VersionComparisonMethod = Callable[ - [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool - ] +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType __all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + Tuple[ + Union[ + SubLocalType, + Tuple[SubLocalType, str], + Tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = Tuple[ + int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +LegacyCmpKey = Tuple[int, Tuple[str, ...]] +VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool +] _Version = collections.namedtuple( "_Version", ["epoch", "release", "dev", "pre", "post", "local"] ) -def parse(version): - # type: (str) -> Union[LegacyVersion, Version] +def parse(version: str) -> Union["LegacyVersion", "Version"]: """ Parse the given version string and return either a :class:`Version` object or a :class:`LegacyVersion` object depending on if the given version is @@ -65,53 +57,46 @@ class InvalidVersion(ValueError): """ -class _BaseVersion(object): - _key = None # type: Union[CmpKey, LegacyCmpKey] +class _BaseVersion: + _key: Union[CmpKey, LegacyCmpKey] - def __hash__(self): - # type: () -> int + def __hash__(self) -> int: return hash(self._key) # Please keep the duplicated `isinstance` check # in the six comparisons hereunder # unless you find a way to avoid adding overhead function calls. - def __lt__(self, other): - # type: (_BaseVersion) -> bool + def __lt__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key < other._key - def __le__(self, other): - # type: (_BaseVersion) -> bool + def __le__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key <= other._key - def __eq__(self, other): - # type: (object) -> bool + def __eq__(self, other: object) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key == other._key - def __ge__(self, other): - # type: (_BaseVersion) -> bool + def __ge__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key >= other._key - def __gt__(self, other): - # type: (_BaseVersion) -> bool + def __gt__(self, other: "_BaseVersion") -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key > other._key - def __ne__(self, other): - # type: (object) -> bool + def __ne__(self, other: object) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented @@ -119,8 +104,7 @@ class _BaseVersion(object): class LegacyVersion(_BaseVersion): - def __init__(self, version): - # type: (str) -> None + def __init__(self, version: str) -> None: self._version = str(version) self._key = _legacy_cmpkey(self._version) @@ -130,67 +114,54 @@ class LegacyVersion(_BaseVersion): DeprecationWarning, ) - def __str__(self): - # type: () -> str + def __str__(self) -> str: return self._version - def __repr__(self): - # type: () -> str - return "<LegacyVersion({0})>".format(repr(str(self))) + def __repr__(self) -> str: + return f"<LegacyVersion('{self}')>" @property - def public(self): - # type: () -> str + def public(self) -> str: return self._version @property - def base_version(self): - # type: () -> str + def base_version(self) -> str: return self._version @property - def epoch(self): - # type: () -> int + def epoch(self) -> int: return -1 @property - def release(self): - # type: () -> None + def release(self) -> None: return None @property - def pre(self): - # type: () -> None + def pre(self) -> None: return None @property - def post(self): - # type: () -> None + def post(self) -> None: return None @property - def dev(self): - # type: () -> None + def dev(self) -> None: return None @property - def local(self): - # type: () -> None + def local(self) -> None: return None @property - def is_prerelease(self): - # type: () -> bool + def is_prerelease(self) -> bool: return False @property - def is_postrelease(self): - # type: () -> bool + def is_postrelease(self) -> bool: return False @property - def is_devrelease(self): - # type: () -> bool + def is_devrelease(self) -> bool: return False @@ -205,8 +176,7 @@ _legacy_version_replacement_map = { } -def _parse_version_parts(s): - # type: (str) -> Iterator[str] +def _parse_version_parts(s: str) -> Iterator[str]: for part in _legacy_version_component_re.split(s): part = _legacy_version_replacement_map.get(part, part) @@ -223,8 +193,7 @@ def _parse_version_parts(s): yield "*final" -def _legacy_cmpkey(version): - # type: (str) -> LegacyCmpKey +def _legacy_cmpkey(version: str) -> LegacyCmpKey: # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch # greater than or equal to 0. This will effectively put the LegacyVersion, @@ -234,7 +203,7 @@ def _legacy_cmpkey(version): # This scheme is taken from pkg_resources.parse_version setuptools prior to # it's adoption of the packaging library. - parts = [] # type: List[str] + parts: List[str] = [] for part in _parse_version_parts(version.lower()): if part.startswith("*"): # remove "-" before a prerelease tag @@ -289,13 +258,12 @@ class Version(_BaseVersion): _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) - def __init__(self, version): - # type: (str) -> None + def __init__(self, version: str) -> None: # Validate the version and parse it into pieces match = self._regex.search(version) if not match: - raise InvalidVersion("Invalid version: '{0}'".format(version)) + raise InvalidVersion(f"Invalid version: '{version}'") # Store the parsed out pieces of the version self._version = _Version( @@ -319,17 +287,15 @@ class Version(_BaseVersion): self._version.local, ) - def __repr__(self): - # type: () -> str - return "<Version({0})>".format(repr(str(self))) + def __repr__(self) -> str: + return f"<Version('{self}')>" - def __str__(self): - # type: () -> str + def __str__(self) -> str: parts = [] # Epoch if self.epoch != 0: - parts.append("{0}!".format(self.epoch)) + parts.append(f"{self.epoch}!") # Release segment parts.append(".".join(str(x) for x in self.release)) @@ -340,67 +306,59 @@ class Version(_BaseVersion): # Post-release if self.post is not None: - parts.append(".post{0}".format(self.post)) + parts.append(f".post{self.post}") # Development release if self.dev is not None: - parts.append(".dev{0}".format(self.dev)) + parts.append(f".dev{self.dev}") # Local version segment if self.local is not None: - parts.append("+{0}".format(self.local)) + parts.append(f"+{self.local}") return "".join(parts) @property - def epoch(self): - # type: () -> int - _epoch = self._version.epoch # type: int + def epoch(self) -> int: + _epoch: int = self._version.epoch return _epoch @property - def release(self): - # type: () -> Tuple[int, ...] - _release = self._version.release # type: Tuple[int, ...] + def release(self) -> Tuple[int, ...]: + _release: Tuple[int, ...] = self._version.release return _release @property - def pre(self): - # type: () -> Optional[Tuple[str, int]] - _pre = self._version.pre # type: Optional[Tuple[str, int]] + def pre(self) -> Optional[Tuple[str, int]]: + _pre: Optional[Tuple[str, int]] = self._version.pre return _pre @property - def post(self): - # type: () -> Optional[Tuple[str, int]] + def post(self) -> Optional[int]: return self._version.post[1] if self._version.post else None @property - def dev(self): - # type: () -> Optional[Tuple[str, int]] + def dev(self) -> Optional[int]: return self._version.dev[1] if self._version.dev else None @property - def local(self): - # type: () -> Optional[str] + def local(self) -> Optional[str]: if self._version.local: return ".".join(str(x) for x in self._version.local) else: return None @property - def public(self): - # type: () -> str + def public(self) -> str: return str(self).split("+", 1)[0] @property - def base_version(self): - # type: () -> str + def base_version(self) -> str: parts = [] # Epoch if self.epoch != 0: - parts.append("{0}!".format(self.epoch)) + parts.append(f"{self.epoch}!") # Release segment parts.append(".".join(str(x) for x in self.release)) @@ -408,41 +366,33 @@ class Version(_BaseVersion): return "".join(parts) @property - def is_prerelease(self): - # type: () -> bool + def is_prerelease(self) -> bool: return self.dev is not None or self.pre is not None @property - def is_postrelease(self): - # type: () -> bool + def is_postrelease(self) -> bool: return self.post is not None @property - def is_devrelease(self): - # type: () -> bool + def is_devrelease(self) -> bool: return self.dev is not None @property - def major(self): - # type: () -> int + def major(self) -> int: return self.release[0] if len(self.release) >= 1 else 0 @property - def minor(self): - # type: () -> int + def minor(self) -> int: return self.release[1] if len(self.release) >= 2 else 0 @property - def micro(self): - # type: () -> int + def micro(self) -> int: return self.release[2] if len(self.release) >= 3 else 0 def _parse_letter_version( - letter, # type: str - number, # type: Union[str, bytes, SupportsInt] -): - # type: (...) -> Optional[Tuple[str, int]] + letter: str, number: Union[str, bytes, SupportsInt] +) -> Optional[Tuple[str, int]]: if letter: # We consider there to be an implicit 0 in a pre-release if there is @@ -479,8 +429,7 @@ def _parse_letter_version( _local_version_separators = re.compile(r"[\._-]") -def _parse_local_version(local): - # type: (str) -> Optional[LocalType] +def _parse_local_version(local: str) -> Optional[LocalType]: """ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). """ @@ -493,14 +442,13 @@ def _parse_local_version(local): def _cmpkey( - epoch, # type: int - release, # type: Tuple[int, ...] - pre, # type: Optional[Tuple[str, int]] - post, # type: Optional[Tuple[str, int]] - dev, # type: Optional[Tuple[str, int]] - local, # type: Optional[Tuple[SubLocalType]] -): - # type: (...) -> CmpKey + epoch: int, + release: Tuple[int, ...], + pre: Optional[Tuple[str, int]], + post: Optional[Tuple[str, int]], + dev: Optional[Tuple[str, int]], + local: Optional[Tuple[SubLocalType]], +) -> CmpKey: # When we compare a release version, we want to compare it with all of the # trailing zeros removed. So we'll use a reverse the list, drop all the now @@ -516,7 +464,7 @@ def _cmpkey( # if there is not a pre or a post segment. If we have one of those then # the normal sorting rules will handle this case correctly. if pre is None and post is None and dev is not None: - _pre = NegativeInfinity # type: PrePostDevType + _pre: PrePostDevType = NegativeInfinity # Versions without a pre-release (except as noted above) should sort after # those with one. elif pre is None: @@ -526,21 +474,21 @@ def _cmpkey( # Versions without a post segment should sort before those with one. if post is None: - _post = NegativeInfinity # type: PrePostDevType + _post: PrePostDevType = NegativeInfinity else: _post = post # Versions without a development segment should sort after those with one. if dev is None: - _dev = Infinity # type: PrePostDevType + _dev: PrePostDevType = Infinity else: _dev = dev if local is None: # Versions without a local segment should sort before those with one. - _local = NegativeInfinity # type: LocalType + _local: LocalType = NegativeInfinity else: # Versions with a local segment need that segment parsed to implement # the sorting rules in PEP440. diff --git a/env/Lib/site-packages/pip/_vendor/pep517/__init__.py b/env/Lib/site-packages/pip/_vendor/pep517/__init__.py deleted file mode 100644 index 3b07c639c463f6bf1ee00679edbd960184c387e7..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Wrappers to build Python packages using PEP 517 hooks -""" - -__version__ = '0.10.0' - -from .wrappers import * # noqa: F401, F403 diff --git a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 589f580d2e21cc8fce9bd99ae00c77a5604a9f06..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/build.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/build.cpython-39.pyc deleted file mode 100644 index 47792844f358eac82058d404fcabf681b685d821..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/build.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/check.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/check.cpython-39.pyc deleted file mode 100644 index 2b8bf48c8712b2a76b7d0cdf337614e9d9df0c53..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/check.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/colorlog.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/colorlog.cpython-39.pyc deleted file mode 100644 index 1eabf2c0aa8cf34f5bc55418a9cc8a6424b94c4e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/colorlog.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/compat.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/compat.cpython-39.pyc deleted file mode 100644 index dd65d1d5cf54e43bedf76232d6b23291b7750c3a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/compat.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/dirtools.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/dirtools.cpython-39.pyc deleted file mode 100644 index 73b1646d7f274b7dcbf0798ac825a436804d8b46..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/dirtools.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/envbuild.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/envbuild.cpython-39.pyc deleted file mode 100644 index 1744d09a53c384ece5d60fe31713465a07fdfbf1..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/envbuild.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/meta.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/meta.cpython-39.pyc deleted file mode 100644 index 3e2ed2c3a3ab0fc6ebb48e6e14542442571237d6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/meta.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/wrappers.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/wrappers.cpython-39.pyc deleted file mode 100644 index 9257929d62f2b2ea70c917bcd7872f56792a791d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/__pycache__/wrappers.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/build.py b/env/Lib/site-packages/pip/_vendor/pep517/build.py deleted file mode 100644 index f884bcf1097d9992700ff718a18b213bc684c7c5..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/build.py +++ /dev/null @@ -1,127 +0,0 @@ -"""Build a project using PEP 517 hooks. -""" -import argparse -import logging -import os -from pip._vendor import toml -import shutil - -from .envbuild import BuildEnvironment -from .wrappers import Pep517HookCaller -from .dirtools import tempdir, mkdir_p -from .compat import FileNotFoundError - -log = logging.getLogger(__name__) - - -def validate_system(system): - """ - Ensure build system has the requisite fields. - """ - required = {'requires', 'build-backend'} - if not (required <= set(system)): - message = "Missing required fields: {missing}".format( - missing=required-set(system), - ) - raise ValueError(message) - - -def load_system(source_dir): - """ - Load the build system from a source dir (pyproject.toml). - """ - pyproject = os.path.join(source_dir, 'pyproject.toml') - with open(pyproject) as f: - pyproject_data = toml.load(f) - return pyproject_data['build-system'] - - -def compat_system(source_dir): - """ - Given a source dir, attempt to get a build system backend - and requirements from pyproject.toml. Fallback to - setuptools but only if the file was not found or a build - system was not indicated. - """ - try: - system = load_system(source_dir) - except (FileNotFoundError, KeyError): - system = {} - system.setdefault( - 'build-backend', - 'setuptools.build_meta:__legacy__', - ) - system.setdefault('requires', ['setuptools', 'wheel']) - return system - - -def _do_build(hooks, env, dist, dest): - get_requires_name = 'get_requires_for_build_{dist}'.format(**locals()) - get_requires = getattr(hooks, get_requires_name) - reqs = get_requires({}) - log.info('Got build requires: %s', reqs) - - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - - with tempdir() as td: - log.info('Trying to build %s in %s', dist, td) - build_name = 'build_{dist}'.format(**locals()) - build = getattr(hooks, build_name) - filename = build(td, {}) - source = os.path.join(td, filename) - shutil.move(source, os.path.join(dest, os.path.basename(filename))) - - -def build(source_dir, dist, dest=None, system=None): - system = system or load_system(source_dir) - dest = os.path.join(source_dir, dest or 'dist') - mkdir_p(dest) - - validate_system(system) - hooks = Pep517HookCaller( - source_dir, system['build-backend'], system.get('backend-path') - ) - - with BuildEnvironment() as env: - env.pip_install(system['requires']) - _do_build(hooks, env, dist, dest) - - -parser = argparse.ArgumentParser() -parser.add_argument( - 'source_dir', - help="A directory containing pyproject.toml", -) -parser.add_argument( - '--binary', '-b', - action='store_true', - default=False, -) -parser.add_argument( - '--source', '-s', - action='store_true', - default=False, -) -parser.add_argument( - '--out-dir', '-o', - help="Destination in which to save the builds relative to source dir", -) - - -def main(args): - log.warning('pep517.build is deprecated. ' - 'Consider switching to https://pypi.org/project/build/') - - # determine which dists to build - dists = list(filter(None, ( - 'sdist' if args.source or not args.binary else None, - 'wheel' if args.binary or not args.source else None, - ))) - - for dist in dists: - build(args.source_dir, dist, args.out_dir) - - -if __name__ == '__main__': - main(parser.parse_args()) diff --git a/env/Lib/site-packages/pip/_vendor/pep517/check.py b/env/Lib/site-packages/pip/_vendor/pep517/check.py deleted file mode 100644 index decab8a3423065a32bda1631e1fc47bdac5e6d38..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/check.py +++ /dev/null @@ -1,206 +0,0 @@ -"""Check a project and backend by attempting to build using PEP 517 hooks. -""" -import argparse -import logging -import os -from os.path import isfile, join as pjoin -from pip._vendor.toml import TomlDecodeError, load as toml_load -import shutil -from subprocess import CalledProcessError -import sys -import tarfile -from tempfile import mkdtemp -import zipfile - -from .colorlog import enable_colourful_output -from .envbuild import BuildEnvironment -from .wrappers import Pep517HookCaller - -log = logging.getLogger(__name__) - - -def check_build_sdist(hooks, build_sys_requires): - with BuildEnvironment() as env: - try: - env.pip_install(build_sys_requires) - log.info('Installed static build dependencies') - except CalledProcessError: - log.error('Failed to install static build dependencies') - return False - - try: - reqs = hooks.get_requires_for_build_sdist({}) - log.info('Got build requires: %s', reqs) - except Exception: - log.error('Failure in get_requires_for_build_sdist', exc_info=True) - return False - - try: - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - except CalledProcessError: - log.error('Failed to install dynamic build dependencies') - return False - - td = mkdtemp() - log.info('Trying to build sdist in %s', td) - try: - try: - filename = hooks.build_sdist(td, {}) - log.info('build_sdist returned %r', filename) - except Exception: - log.info('Failure in build_sdist', exc_info=True) - return False - - if not filename.endswith('.tar.gz'): - log.error( - "Filename %s doesn't have .tar.gz extension", filename) - return False - - path = pjoin(td, filename) - if isfile(path): - log.info("Output file %s exists", path) - else: - log.error("Output file %s does not exist", path) - return False - - if tarfile.is_tarfile(path): - log.info("Output file is a tar file") - else: - log.error("Output file is not a tar file") - return False - - finally: - shutil.rmtree(td) - - return True - - -def check_build_wheel(hooks, build_sys_requires): - with BuildEnvironment() as env: - try: - env.pip_install(build_sys_requires) - log.info('Installed static build dependencies') - except CalledProcessError: - log.error('Failed to install static build dependencies') - return False - - try: - reqs = hooks.get_requires_for_build_wheel({}) - log.info('Got build requires: %s', reqs) - except Exception: - log.error('Failure in get_requires_for_build_sdist', exc_info=True) - return False - - try: - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - except CalledProcessError: - log.error('Failed to install dynamic build dependencies') - return False - - td = mkdtemp() - log.info('Trying to build wheel in %s', td) - try: - try: - filename = hooks.build_wheel(td, {}) - log.info('build_wheel returned %r', filename) - except Exception: - log.info('Failure in build_wheel', exc_info=True) - return False - - if not filename.endswith('.whl'): - log.error("Filename %s doesn't have .whl extension", filename) - return False - - path = pjoin(td, filename) - if isfile(path): - log.info("Output file %s exists", path) - else: - log.error("Output file %s does not exist", path) - return False - - if zipfile.is_zipfile(path): - log.info("Output file is a zip file") - else: - log.error("Output file is not a zip file") - return False - - finally: - shutil.rmtree(td) - - return True - - -def check(source_dir): - pyproject = pjoin(source_dir, 'pyproject.toml') - if isfile(pyproject): - log.info('Found pyproject.toml') - else: - log.error('Missing pyproject.toml') - return False - - try: - with open(pyproject) as f: - pyproject_data = toml_load(f) - # Ensure the mandatory data can be loaded - buildsys = pyproject_data['build-system'] - requires = buildsys['requires'] - backend = buildsys['build-backend'] - backend_path = buildsys.get('backend-path') - log.info('Loaded pyproject.toml') - except (TomlDecodeError, KeyError): - log.error("Invalid pyproject.toml", exc_info=True) - return False - - hooks = Pep517HookCaller(source_dir, backend, backend_path) - - sdist_ok = check_build_sdist(hooks, requires) - wheel_ok = check_build_wheel(hooks, requires) - - if not sdist_ok: - log.warning('Sdist checks failed; scroll up to see') - if not wheel_ok: - log.warning('Wheel checks failed') - - return sdist_ok - - -def main(argv=None): - log.warning('pep517.check is deprecated. ' - 'Consider switching to https://pypi.org/project/build/') - - ap = argparse.ArgumentParser() - ap.add_argument( - 'source_dir', - help="A directory containing pyproject.toml") - args = ap.parse_args(argv) - - enable_colourful_output() - - ok = check(args.source_dir) - - if ok: - print(ansi('Checks passed', 'green')) - else: - print(ansi('Checks failed', 'red')) - sys.exit(1) - - -ansi_codes = { - 'reset': '\x1b[0m', - 'bold': '\x1b[1m', - 'red': '\x1b[31m', - 'green': '\x1b[32m', -} - - -def ansi(s, attr): - if os.name != 'nt' and sys.stdout.isatty(): - return ansi_codes[attr] + str(s) + ansi_codes['reset'] - else: - return str(s) - - -if __name__ == '__main__': - main() diff --git a/env/Lib/site-packages/pip/_vendor/pep517/colorlog.py b/env/Lib/site-packages/pip/_vendor/pep517/colorlog.py deleted file mode 100644 index 69c8a59d3d4e038450aa37ec5b801914b817e675..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/colorlog.py +++ /dev/null @@ -1,115 +0,0 @@ -"""Nicer log formatting with colours. - -Code copied from Tornado, Apache licensed. -""" -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import sys - -try: - import curses -except ImportError: - curses = None - - -def _stderr_supports_color(): - color = False - if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): - try: - curses.setupterm() - if curses.tigetnum("colors") > 0: - color = True - except Exception: - pass - return color - - -class LogFormatter(logging.Formatter): - """Log formatter with colour support - """ - DEFAULT_COLORS = { - logging.INFO: 2, # Green - logging.WARNING: 3, # Yellow - logging.ERROR: 1, # Red - logging.CRITICAL: 1, - } - - def __init__(self, color=True, datefmt=None): - r""" - :arg bool color: Enables color support. - :arg string fmt: Log message format. - It will be applied to the attributes dict of log records. The - text between ``%(color)s`` and ``%(end_color)s`` will be colored - depending on the level if color support is on. - :arg dict colors: color mappings from logging level to terminal color - code - :arg string datefmt: Datetime format. - Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. - .. versionchanged:: 3.2 - Added ``fmt`` and ``datefmt`` arguments. - """ - logging.Formatter.__init__(self, datefmt=datefmt) - self._colors = {} - if color and _stderr_supports_color(): - # The curses module has some str/bytes confusion in - # python3. Until version 3.2.3, most methods return - # bytes, but only accept strings. In addition, we want to - # output these strings with the logging module, which - # works with unicode strings. The explicit calls to - # unicode() below are harmless in python2 but will do the - # right conversion in python 3. - fg_color = (curses.tigetstr("setaf") or - curses.tigetstr("setf") or "") - if (3, 0) < sys.version_info < (3, 2, 3): - fg_color = str(fg_color, "ascii") - - for levelno, code in self.DEFAULT_COLORS.items(): - self._colors[levelno] = str( - curses.tparm(fg_color, code), "ascii") - self._normal = str(curses.tigetstr("sgr0"), "ascii") - - scr = curses.initscr() - self.termwidth = scr.getmaxyx()[1] - curses.endwin() - else: - self._normal = '' - # Default width is usually 80, but too wide is - # worse than too narrow - self.termwidth = 70 - - def formatMessage(self, record): - mlen = len(record.message) - right_text = '{initial}-{name}'.format(initial=record.levelname[0], - name=record.name) - if mlen + len(right_text) < self.termwidth: - space = ' ' * (self.termwidth - (mlen + len(right_text))) - else: - space = ' ' - - if record.levelno in self._colors: - start_color = self._colors[record.levelno] - end_color = self._normal - else: - start_color = end_color = '' - - return record.message + space + start_color + right_text + end_color - - -def enable_colourful_output(level=logging.INFO): - handler = logging.StreamHandler() - handler.setFormatter(LogFormatter()) - logging.root.addHandler(handler) - logging.root.setLevel(level) diff --git a/env/Lib/site-packages/pip/_vendor/pep517/compat.py b/env/Lib/site-packages/pip/_vendor/pep517/compat.py deleted file mode 100644 index 8432acb7324db7c3f26c764a584012849b6c88bb..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/compat.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Python 2/3 compatibility""" -import json -import sys - - -# Handle reading and writing JSON in UTF-8, on Python 3 and 2. - -if sys.version_info[0] >= 3: - # Python 3 - def write_json(obj, path, **kwargs): - with open(path, 'w', encoding='utf-8') as f: - json.dump(obj, f, **kwargs) - - def read_json(path): - with open(path, 'r', encoding='utf-8') as f: - return json.load(f) - -else: - # Python 2 - def write_json(obj, path, **kwargs): - with open(path, 'wb') as f: - json.dump(obj, f, encoding='utf-8', **kwargs) - - def read_json(path): - with open(path, 'rb') as f: - return json.load(f) - - -# FileNotFoundError - -try: - FileNotFoundError = FileNotFoundError -except NameError: - FileNotFoundError = IOError diff --git a/env/Lib/site-packages/pip/_vendor/pep517/dirtools.py b/env/Lib/site-packages/pip/_vendor/pep517/dirtools.py deleted file mode 100644 index 58c6ca0c56bcafe43497f4a598977b27cb5e7d23..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/dirtools.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import io -import contextlib -import tempfile -import shutil -import errno -import zipfile - - -@contextlib.contextmanager -def tempdir(): - """Create a temporary directory in a context manager.""" - td = tempfile.mkdtemp() - try: - yield td - finally: - shutil.rmtree(td) - - -def mkdir_p(*args, **kwargs): - """Like `mkdir`, but does not raise an exception if the - directory already exists. - """ - try: - return os.mkdir(*args, **kwargs) - except OSError as exc: - if exc.errno != errno.EEXIST: - raise - - -def dir_to_zipfile(root): - """Construct an in-memory zip file for a directory.""" - buffer = io.BytesIO() - zip_file = zipfile.ZipFile(buffer, 'w') - for root, dirs, files in os.walk(root): - for path in dirs: - fs_path = os.path.join(root, path) - rel_path = os.path.relpath(fs_path, root) - zip_file.writestr(rel_path + '/', '') - for path in files: - fs_path = os.path.join(root, path) - rel_path = os.path.relpath(fs_path, root) - zip_file.write(fs_path, rel_path) - return zip_file diff --git a/env/Lib/site-packages/pip/_vendor/pep517/envbuild.py b/env/Lib/site-packages/pip/_vendor/pep517/envbuild.py deleted file mode 100644 index 4088dcdb40a81efbedc9bb595c2ce69e05946217..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/envbuild.py +++ /dev/null @@ -1,167 +0,0 @@ -"""Build wheels/sdists by installing build deps to a temporary environment. -""" - -import os -import logging -from pip._vendor import toml -import shutil -from subprocess import check_call -import sys -from sysconfig import get_paths -from tempfile import mkdtemp - -from .wrappers import Pep517HookCaller, LoggerWrapper - -log = logging.getLogger(__name__) - - -def _load_pyproject(source_dir): - with open(os.path.join(source_dir, 'pyproject.toml')) as f: - pyproject_data = toml.load(f) - buildsys = pyproject_data['build-system'] - return ( - buildsys['requires'], - buildsys['build-backend'], - buildsys.get('backend-path'), - ) - - -class BuildEnvironment(object): - """Context manager to install build deps in a simple temporary environment - - Based on code I wrote for pip, which is MIT licensed. - """ - # Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file) - # - # Permission is hereby granted, free of charge, to any person obtaining - # a copy of this software and associated documentation files (the - # "Software"), to deal in the Software without restriction, including - # without limitation the rights to use, copy, modify, merge, publish, - # distribute, sublicense, and/or sell copies of the Software, and to - # permit persons to whom the Software is furnished to do so, subject to - # the following conditions: - # - # The above copyright notice and this permission notice shall be - # included in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - path = None - - def __init__(self, cleanup=True): - self._cleanup = cleanup - - def __enter__(self): - self.path = mkdtemp(prefix='pep517-build-env-') - log.info('Temporary build environment: %s', self.path) - - self.save_path = os.environ.get('PATH', None) - self.save_pythonpath = os.environ.get('PYTHONPATH', None) - - install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' - install_dirs = get_paths(install_scheme, vars={ - 'base': self.path, - 'platbase': self.path, - }) - - scripts = install_dirs['scripts'] - if self.save_path: - os.environ['PATH'] = scripts + os.pathsep + self.save_path - else: - os.environ['PATH'] = scripts + os.pathsep + os.defpath - - if install_dirs['purelib'] == install_dirs['platlib']: - lib_dirs = install_dirs['purelib'] - else: - lib_dirs = install_dirs['purelib'] + os.pathsep + \ - install_dirs['platlib'] - if self.save_pythonpath: - os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ - self.save_pythonpath - else: - os.environ['PYTHONPATH'] = lib_dirs - - return self - - def pip_install(self, reqs): - """Install dependencies into this env by calling pip in a subprocess""" - if not reqs: - return - log.info('Calling pip to install %s', reqs) - cmd = [ - sys.executable, '-m', 'pip', 'install', '--ignore-installed', - '--prefix', self.path] + list(reqs) - check_call( - cmd, - stdout=LoggerWrapper(log, logging.INFO), - stderr=LoggerWrapper(log, logging.ERROR), - ) - - def __exit__(self, exc_type, exc_val, exc_tb): - needs_cleanup = ( - self._cleanup and - self.path is not None and - os.path.isdir(self.path) - ) - if needs_cleanup: - shutil.rmtree(self.path) - - if self.save_path is None: - os.environ.pop('PATH', None) - else: - os.environ['PATH'] = self.save_path - - if self.save_pythonpath is None: - os.environ.pop('PYTHONPATH', None) - else: - os.environ['PYTHONPATH'] = self.save_pythonpath - - -def build_wheel(source_dir, wheel_dir, config_settings=None): - """Build a wheel from a source directory using PEP 517 hooks. - - :param str source_dir: Source directory containing pyproject.toml - :param str wheel_dir: Target directory to create wheel in - :param dict config_settings: Options to pass to build backend - - This is a blocking function which will run pip in a subprocess to install - build requirements. - """ - if config_settings is None: - config_settings = {} - requires, backend, backend_path = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend, backend_path) - - with BuildEnvironment() as env: - env.pip_install(requires) - reqs = hooks.get_requires_for_build_wheel(config_settings) - env.pip_install(reqs) - return hooks.build_wheel(wheel_dir, config_settings) - - -def build_sdist(source_dir, sdist_dir, config_settings=None): - """Build an sdist from a source directory using PEP 517 hooks. - - :param str source_dir: Source directory containing pyproject.toml - :param str sdist_dir: Target directory to place sdist in - :param dict config_settings: Options to pass to build backend - - This is a blocking function which will run pip in a subprocess to install - build requirements. - """ - if config_settings is None: - config_settings = {} - requires, backend, backend_path = _load_pyproject(source_dir) - hooks = Pep517HookCaller(source_dir, backend, backend_path) - - with BuildEnvironment() as env: - env.pip_install(requires) - reqs = hooks.get_requires_for_build_sdist(config_settings) - env.pip_install(reqs) - return hooks.build_sdist(sdist_dir, config_settings) diff --git a/env/Lib/site-packages/pip/_vendor/pep517/in_process/__init__.py b/env/Lib/site-packages/pip/_vendor/pep517/in_process/__init__.py deleted file mode 100644 index c932313b32868c71ce3d86896fffe6d00722b35d..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/in_process/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -"""This is a subpackage because the directory is on sys.path for _in_process.py - -The subpackage should stay as empty as possible to avoid shadowing modules that -the backend might import. -""" -from os.path import dirname, abspath, join as pjoin -from contextlib import contextmanager - -try: - import importlib.resources as resources - - def _in_proc_script_path(): - return resources.path(__package__, '_in_process.py') -except ImportError: - @contextmanager - def _in_proc_script_path(): - yield pjoin(dirname(abspath(__file__)), '_in_process.py') diff --git a/env/Lib/site-packages/pip/_vendor/pep517/in_process/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/in_process/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index e2722f2ea3c2184a72df767f0512df0257b1aacd..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/in_process/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/in_process/__pycache__/_in_process.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pep517/in_process/__pycache__/_in_process.cpython-39.pyc deleted file mode 100644 index 3872c9f3274e7d6f9553553705736291542f8262..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pep517/in_process/__pycache__/_in_process.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pep517/in_process/_in_process.py b/env/Lib/site-packages/pip/_vendor/pep517/in_process/_in_process.py deleted file mode 100644 index a536b03e6bbea0e63459d2f7b71f027ec6c7a150..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/in_process/_in_process.py +++ /dev/null @@ -1,280 +0,0 @@ -"""This is invoked in a subprocess to call the build backend hooks. - -It expects: -- Command line args: hook_name, control_dir -- Environment variables: - PEP517_BUILD_BACKEND=entry.point:spec - PEP517_BACKEND_PATH=paths (separated with os.pathsep) -- control_dir/input.json: - - {"kwargs": {...}} - -Results: -- control_dir/output.json - - {"return_val": ...} -""" -from glob import glob -from importlib import import_module -import json -import os -import os.path -from os.path import join as pjoin -import re -import shutil -import sys -import traceback - -# This file is run as a script, and `import compat` is not zip-safe, so we -# include write_json() and read_json() from compat.py. -# -# Handle reading and writing JSON in UTF-8, on Python 3 and 2. - -if sys.version_info[0] >= 3: - # Python 3 - def write_json(obj, path, **kwargs): - with open(path, 'w', encoding='utf-8') as f: - json.dump(obj, f, **kwargs) - - def read_json(path): - with open(path, 'r', encoding='utf-8') as f: - return json.load(f) - -else: - # Python 2 - def write_json(obj, path, **kwargs): - with open(path, 'wb') as f: - json.dump(obj, f, encoding='utf-8', **kwargs) - - def read_json(path): - with open(path, 'rb') as f: - return json.load(f) - - -class BackendUnavailable(Exception): - """Raised if we cannot import the backend""" - def __init__(self, traceback): - self.traceback = traceback - - -class BackendInvalid(Exception): - """Raised if the backend is invalid""" - def __init__(self, message): - self.message = message - - -class HookMissing(Exception): - """Raised if a hook is missing and we are not executing the fallback""" - - -def contained_in(filename, directory): - """Test if a file is located within the given directory.""" - filename = os.path.normcase(os.path.abspath(filename)) - directory = os.path.normcase(os.path.abspath(directory)) - return os.path.commonprefix([filename, directory]) == directory - - -def _build_backend(): - """Find and load the build backend""" - # Add in-tree backend directories to the front of sys.path. - backend_path = os.environ.get('PEP517_BACKEND_PATH') - if backend_path: - extra_pathitems = backend_path.split(os.pathsep) - sys.path[:0] = extra_pathitems - - ep = os.environ['PEP517_BUILD_BACKEND'] - mod_path, _, obj_path = ep.partition(':') - try: - obj = import_module(mod_path) - except ImportError: - raise BackendUnavailable(traceback.format_exc()) - - if backend_path: - if not any( - contained_in(obj.__file__, path) - for path in extra_pathitems - ): - raise BackendInvalid("Backend was not loaded from backend-path") - - if obj_path: - for path_part in obj_path.split('.'): - obj = getattr(obj, path_part) - return obj - - -def get_requires_for_build_wheel(config_settings): - """Invoke the optional get_requires_for_build_wheel hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_wheel - except AttributeError: - return [] - else: - return hook(config_settings) - - -def prepare_metadata_for_build_wheel( - metadata_directory, config_settings, _allow_fallback): - """Invoke optional prepare_metadata_for_build_wheel - - Implements a fallback by building a wheel if the hook isn't defined, - unless _allow_fallback is False in which case HookMissing is raised. - """ - backend = _build_backend() - try: - hook = backend.prepare_metadata_for_build_wheel - except AttributeError: - if not _allow_fallback: - raise HookMissing() - return _get_wheel_metadata_from_wheel(backend, metadata_directory, - config_settings) - else: - return hook(metadata_directory, config_settings) - - -WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' - - -def _dist_info_files(whl_zip): - """Identify the .dist-info folder inside a wheel ZipFile.""" - res = [] - for path in whl_zip.namelist(): - m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) - if m: - res.append(path) - if res: - return res - raise Exception("No .dist-info folder found in wheel") - - -def _get_wheel_metadata_from_wheel( - backend, metadata_directory, config_settings): - """Build a wheel and extract the metadata from it. - - Fallback for when the build backend does not - define the 'get_wheel_metadata' hook. - """ - from zipfile import ZipFile - whl_basename = backend.build_wheel(metadata_directory, config_settings) - with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): - pass # Touch marker file - - whl_file = os.path.join(metadata_directory, whl_basename) - with ZipFile(whl_file) as zipf: - dist_info = _dist_info_files(zipf) - zipf.extractall(path=metadata_directory, members=dist_info) - return dist_info[0].split('/')[0] - - -def _find_already_built_wheel(metadata_directory): - """Check for a wheel already built during the get_wheel_metadata hook. - """ - if not metadata_directory: - return None - metadata_parent = os.path.dirname(metadata_directory) - if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): - return None - - whl_files = glob(os.path.join(metadata_parent, '*.whl')) - if not whl_files: - print('Found wheel built marker, but no .whl files') - return None - if len(whl_files) > 1: - print('Found multiple .whl files; unspecified behaviour. ' - 'Will call build_wheel.') - return None - - # Exactly one .whl file - return whl_files[0] - - -def build_wheel(wheel_directory, config_settings, metadata_directory=None): - """Invoke the mandatory build_wheel hook. - - If a wheel was already built in the - prepare_metadata_for_build_wheel fallback, this - will copy it rather than rebuilding the wheel. - """ - prebuilt_whl = _find_already_built_wheel(metadata_directory) - if prebuilt_whl: - shutil.copy2(prebuilt_whl, wheel_directory) - return os.path.basename(prebuilt_whl) - - return _build_backend().build_wheel(wheel_directory, config_settings, - metadata_directory) - - -def get_requires_for_build_sdist(config_settings): - """Invoke the optional get_requires_for_build_wheel hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_sdist - except AttributeError: - return [] - else: - return hook(config_settings) - - -class _DummyException(Exception): - """Nothing should ever raise this exception""" - - -class GotUnsupportedOperation(Exception): - """For internal use when backend raises UnsupportedOperation""" - def __init__(self, traceback): - self.traceback = traceback - - -def build_sdist(sdist_directory, config_settings): - """Invoke the mandatory build_sdist hook.""" - backend = _build_backend() - try: - return backend.build_sdist(sdist_directory, config_settings) - except getattr(backend, 'UnsupportedOperation', _DummyException): - raise GotUnsupportedOperation(traceback.format_exc()) - - -HOOK_NAMES = { - 'get_requires_for_build_wheel', - 'prepare_metadata_for_build_wheel', - 'build_wheel', - 'get_requires_for_build_sdist', - 'build_sdist', -} - - -def main(): - if len(sys.argv) < 3: - sys.exit("Needs args: hook_name, control_dir") - hook_name = sys.argv[1] - control_dir = sys.argv[2] - if hook_name not in HOOK_NAMES: - sys.exit("Unknown hook: %s" % hook_name) - hook = globals()[hook_name] - - hook_input = read_json(pjoin(control_dir, 'input.json')) - - json_out = {'unsupported': False, 'return_val': None} - try: - json_out['return_val'] = hook(**hook_input['kwargs']) - except BackendUnavailable as e: - json_out['no_backend'] = True - json_out['traceback'] = e.traceback - except BackendInvalid as e: - json_out['backend_invalid'] = True - json_out['backend_error'] = e.message - except GotUnsupportedOperation as e: - json_out['unsupported'] = True - json_out['traceback'] = e.traceback - except HookMissing: - json_out['hook_missing'] = True - - write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) - - -if __name__ == '__main__': - main() diff --git a/env/Lib/site-packages/pip/_vendor/pep517/meta.py b/env/Lib/site-packages/pip/_vendor/pep517/meta.py deleted file mode 100644 index d525de5c6c8791def8dbb2f460027e200c934874..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/meta.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Build metadata for a project using PEP 517 hooks. -""" -import argparse -import logging -import os -import shutil -import functools - -try: - import importlib.metadata as imp_meta -except ImportError: - import importlib_metadata as imp_meta - -try: - from zipfile import Path -except ImportError: - from zipp import Path - -from .envbuild import BuildEnvironment -from .wrappers import Pep517HookCaller, quiet_subprocess_runner -from .dirtools import tempdir, mkdir_p, dir_to_zipfile -from .build import validate_system, load_system, compat_system - -log = logging.getLogger(__name__) - - -def _prep_meta(hooks, env, dest): - reqs = hooks.get_requires_for_build_wheel({}) - log.info('Got build requires: %s', reqs) - - env.pip_install(reqs) - log.info('Installed dynamic build dependencies') - - with tempdir() as td: - log.info('Trying to build metadata in %s', td) - filename = hooks.prepare_metadata_for_build_wheel(td, {}) - source = os.path.join(td, filename) - shutil.move(source, os.path.join(dest, os.path.basename(filename))) - - -def build(source_dir='.', dest=None, system=None): - system = system or load_system(source_dir) - dest = os.path.join(source_dir, dest or 'dist') - mkdir_p(dest) - validate_system(system) - hooks = Pep517HookCaller( - source_dir, system['build-backend'], system.get('backend-path') - ) - - with hooks.subprocess_runner(quiet_subprocess_runner): - with BuildEnvironment() as env: - env.pip_install(system['requires']) - _prep_meta(hooks, env, dest) - - -def build_as_zip(builder=build): - with tempdir() as out_dir: - builder(dest=out_dir) - return dir_to_zipfile(out_dir) - - -def load(root): - """ - Given a source directory (root) of a package, - return an importlib.metadata.Distribution object - with metadata build from that package. - """ - root = os.path.expanduser(root) - system = compat_system(root) - builder = functools.partial(build, source_dir=root, system=system) - path = Path(build_as_zip(builder)) - return imp_meta.PathDistribution(path) - - -parser = argparse.ArgumentParser() -parser.add_argument( - 'source_dir', - help="A directory containing pyproject.toml", -) -parser.add_argument( - '--out-dir', '-o', - help="Destination in which to save the builds relative to source dir", -) - - -def main(): - args = parser.parse_args() - build(args.source_dir, args.out_dir) - - -if __name__ == '__main__': - main() diff --git a/env/Lib/site-packages/pip/_vendor/pep517/wrappers.py b/env/Lib/site-packages/pip/_vendor/pep517/wrappers.py deleted file mode 100644 index 00974aa8bea31fe1298bdf22e9c49eba2cda2544..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pep517/wrappers.py +++ /dev/null @@ -1,318 +0,0 @@ -import threading -from contextlib import contextmanager -import os -from os.path import abspath, join as pjoin -import shutil -from subprocess import check_call, check_output, STDOUT -import sys -from tempfile import mkdtemp - -from . import compat -from .in_process import _in_proc_script_path - -__all__ = [ - 'BackendUnavailable', - 'BackendInvalid', - 'HookMissing', - 'UnsupportedOperation', - 'default_subprocess_runner', - 'quiet_subprocess_runner', - 'Pep517HookCaller', -] - - -@contextmanager -def tempdir(): - td = mkdtemp() - try: - yield td - finally: - shutil.rmtree(td) - - -class BackendUnavailable(Exception): - """Will be raised if the backend cannot be imported in the hook process.""" - def __init__(self, traceback): - self.traceback = traceback - - -class BackendInvalid(Exception): - """Will be raised if the backend is invalid.""" - def __init__(self, backend_name, backend_path, message): - self.backend_name = backend_name - self.backend_path = backend_path - self.message = message - - -class HookMissing(Exception): - """Will be raised on missing hooks.""" - def __init__(self, hook_name): - super(HookMissing, self).__init__(hook_name) - self.hook_name = hook_name - - -class UnsupportedOperation(Exception): - """May be raised by build_sdist if the backend indicates that it can't.""" - def __init__(self, traceback): - self.traceback = traceback - - -def default_subprocess_runner(cmd, cwd=None, extra_environ=None): - """The default method of calling the wrapper subprocess.""" - env = os.environ.copy() - if extra_environ: - env.update(extra_environ) - - check_call(cmd, cwd=cwd, env=env) - - -def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None): - """A method of calling the wrapper subprocess while suppressing output.""" - env = os.environ.copy() - if extra_environ: - env.update(extra_environ) - - check_output(cmd, cwd=cwd, env=env, stderr=STDOUT) - - -def norm_and_check(source_tree, requested): - """Normalise and check a backend path. - - Ensure that the requested backend path is specified as a relative path, - and resolves to a location under the given source tree. - - Return an absolute version of the requested path. - """ - if os.path.isabs(requested): - raise ValueError("paths must be relative") - - abs_source = os.path.abspath(source_tree) - abs_requested = os.path.normpath(os.path.join(abs_source, requested)) - # We have to use commonprefix for Python 2.7 compatibility. So we - # normalise case to avoid problems because commonprefix is a character - # based comparison :-( - norm_source = os.path.normcase(abs_source) - norm_requested = os.path.normcase(abs_requested) - if os.path.commonprefix([norm_source, norm_requested]) != norm_source: - raise ValueError("paths must be inside source tree") - - return abs_requested - - -class Pep517HookCaller(object): - """A wrapper around a source directory to be built with a PEP 517 backend. - - :param source_dir: The path to the source directory, containing - pyproject.toml. - :param build_backend: The build backend spec, as per PEP 517, from - pyproject.toml. - :param backend_path: The backend path, as per PEP 517, from pyproject.toml. - :param runner: A callable that invokes the wrapper subprocess. - :param python_executable: The Python executable used to invoke the backend - - The 'runner', if provided, must expect the following: - - - cmd: a list of strings representing the command and arguments to - execute, as would be passed to e.g. 'subprocess.check_call'. - - cwd: a string representing the working directory that must be - used for the subprocess. Corresponds to the provided source_dir. - - extra_environ: a dict mapping environment variable names to values - which must be set for the subprocess execution. - """ - def __init__( - self, - source_dir, - build_backend, - backend_path=None, - runner=None, - python_executable=None, - ): - if runner is None: - runner = default_subprocess_runner - - self.source_dir = abspath(source_dir) - self.build_backend = build_backend - if backend_path: - backend_path = [ - norm_and_check(self.source_dir, p) for p in backend_path - ] - self.backend_path = backend_path - self._subprocess_runner = runner - if not python_executable: - python_executable = sys.executable - self.python_executable = python_executable - - @contextmanager - def subprocess_runner(self, runner): - """A context manager for temporarily overriding the default subprocess - runner. - """ - prev = self._subprocess_runner - self._subprocess_runner = runner - try: - yield - finally: - self._subprocess_runner = prev - - def get_requires_for_build_wheel(self, config_settings=None): - """Identify packages required for building a wheel - - Returns a list of dependency specifications, e.g.:: - - ["wheel >= 0.25", "setuptools"] - - This does not include requirements specified in pyproject.toml. - It returns the result of calling the equivalently named hook in a - subprocess. - """ - return self._call_hook('get_requires_for_build_wheel', { - 'config_settings': config_settings - }) - - def prepare_metadata_for_build_wheel( - self, metadata_directory, config_settings=None, - _allow_fallback=True): - """Prepare a ``*.dist-info`` folder with metadata for this project. - - Returns the name of the newly created folder. - - If the build backend defines a hook with this name, it will be called - in a subprocess. If not, the backend will be asked to build a wheel, - and the dist-info extracted from that (unless _allow_fallback is - False). - """ - return self._call_hook('prepare_metadata_for_build_wheel', { - 'metadata_directory': abspath(metadata_directory), - 'config_settings': config_settings, - '_allow_fallback': _allow_fallback, - }) - - def build_wheel( - self, wheel_directory, config_settings=None, - metadata_directory=None): - """Build a wheel from this project. - - Returns the name of the newly created file. - - In general, this will call the 'build_wheel' hook in the backend. - However, if that was previously called by - 'prepare_metadata_for_build_wheel', and the same metadata_directory is - used, the previously built wheel will be copied to wheel_directory. - """ - if metadata_directory is not None: - metadata_directory = abspath(metadata_directory) - return self._call_hook('build_wheel', { - 'wheel_directory': abspath(wheel_directory), - 'config_settings': config_settings, - 'metadata_directory': metadata_directory, - }) - - def get_requires_for_build_sdist(self, config_settings=None): - """Identify packages required for building a wheel - - Returns a list of dependency specifications, e.g.:: - - ["setuptools >= 26"] - - This does not include requirements specified in pyproject.toml. - It returns the result of calling the equivalently named hook in a - subprocess. - """ - return self._call_hook('get_requires_for_build_sdist', { - 'config_settings': config_settings - }) - - def build_sdist(self, sdist_directory, config_settings=None): - """Build an sdist from this project. - - Returns the name of the newly created file. - - This calls the 'build_sdist' backend hook in a subprocess. - """ - return self._call_hook('build_sdist', { - 'sdist_directory': abspath(sdist_directory), - 'config_settings': config_settings, - }) - - def _call_hook(self, hook_name, kwargs): - # On Python 2, pytoml returns Unicode values (which is correct) but the - # environment passed to check_call needs to contain string values. We - # convert here by encoding using ASCII (the backend can only contain - # letters, digits and _, . and : characters, and will be used as a - # Python identifier, so non-ASCII content is wrong on Python 2 in - # any case). - # For backend_path, we use sys.getfilesystemencoding. - if sys.version_info[0] == 2: - build_backend = self.build_backend.encode('ASCII') - else: - build_backend = self.build_backend - extra_environ = {'PEP517_BUILD_BACKEND': build_backend} - - if self.backend_path: - backend_path = os.pathsep.join(self.backend_path) - if sys.version_info[0] == 2: - backend_path = backend_path.encode(sys.getfilesystemencoding()) - extra_environ['PEP517_BACKEND_PATH'] = backend_path - - with tempdir() as td: - hook_input = {'kwargs': kwargs} - compat.write_json(hook_input, pjoin(td, 'input.json'), - indent=2) - - # Run the hook in a subprocess - with _in_proc_script_path() as script: - python = self.python_executable - self._subprocess_runner( - [python, abspath(str(script)), hook_name, td], - cwd=self.source_dir, - extra_environ=extra_environ - ) - - data = compat.read_json(pjoin(td, 'output.json')) - if data.get('unsupported'): - raise UnsupportedOperation(data.get('traceback', '')) - if data.get('no_backend'): - raise BackendUnavailable(data.get('traceback', '')) - if data.get('backend_invalid'): - raise BackendInvalid( - backend_name=self.build_backend, - backend_path=self.backend_path, - message=data.get('backend_error', '') - ) - if data.get('hook_missing'): - raise HookMissing(hook_name) - return data['return_val'] - - -class LoggerWrapper(threading.Thread): - """ - Read messages from a pipe and redirect them - to a logger (see python's logging module). - """ - - def __init__(self, logger, level): - threading.Thread.__init__(self) - self.daemon = True - - self.logger = logger - self.level = level - - # create the pipe and reader - self.fd_read, self.fd_write = os.pipe() - self.reader = os.fdopen(self.fd_read) - - self.start() - - def fileno(self): - return self.fd_write - - @staticmethod - def remove_newline(msg): - return msg[:-1] if msg.endswith(os.linesep) else msg - - def run(self): - for line in self.reader: - self._write(self.remove_newline(line)) - - def _write(self, message): - self.logger.log(self.level, message) diff --git a/env/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py b/env/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py index a457ff27ef0f766ed6f9bd19858ac1ecb2f109cf..ad2794077b0a0299700fd0e8a0336bd1d6e24677 100644 --- a/env/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py @@ -1,4 +1,3 @@ -# coding: utf-8 """ Package resource API -------------------- @@ -13,9 +12,10 @@ The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. -""" -from __future__ import absolute_import +This module is deprecated. Users are directed to :mod:`importlib.resources`, +:mod:`importlib.metadata` and :pypi:`packaging` instead. +""" import sys import os @@ -37,10 +37,10 @@ import email.parser import errno import tempfile import textwrap -import itertools import inspect import ntpath import posixpath +import importlib from pkgutil import get_importer try: @@ -54,13 +54,12 @@ try: except NameError: FileExistsError = OSError -from pip._vendor import six -from pip._vendor.six.moves import urllib, map, filter - # capture these to bypass sandboxing from os import utime + try: from os import mkdir, rename, unlink + WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE @@ -71,31 +70,30 @@ from os.path import isdir, split try: import importlib.machinery as importlib_machinery + # access attribute to force import under delayed import mechanisms. importlib_machinery.__name__ except ImportError: importlib_machinery = None -from . import py31compat -from pip._vendor import appdirs +from pip._internal.utils._jaraco_text import ( + yield_lines, + drop_comment, + join_continuation, +) + +from pip._vendor import platformdirs from pip._vendor import packaging + __import__('pip._vendor.packaging.version') __import__('pip._vendor.packaging.specifiers') __import__('pip._vendor.packaging.requirements') __import__('pip._vendor.packaging.markers') +__import__('pip._vendor.packaging.utils') - -__metaclass__ = type - - -if (3, 0) < sys.version_info < (3, 5): +if sys.version_info < (3, 5): raise RuntimeError("Python 3.5 or later is required") -if six.PY2: - # Those builtin exceptions are only defined in Python 3 - PermissionError = None - NotADirectoryError = None - # declare some globals that will be defined later to # satisfy the linters. require = None @@ -117,6 +115,17 @@ _namespace_handlers = None _namespace_packages = None +warnings.warn( + "pkg_resources is deprecated as an API. " + "See https://setuptools.pypa.io/en/latest/pkg_resources.html", + DeprecationWarning, + stacklevel=2 +) + + +_PEP440_FALLBACK = re.compile(r"^v?(?P<safe>(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*)", re.I) + + class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with @@ -124,11 +133,7 @@ class PEP440Warning(RuntimeWarning): """ -def parse_version(v): - try: - return packaging.version.Version(v) - except packaging.version.InvalidVersion: - return packaging.version.LegacyVersion(v) +parse_version = packaging.version.Version _state_vars = {} @@ -178,10 +183,10 @@ def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version - of Mac OS X that would be required to *use* extensions produced by + of macOS that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the - version of Mac OS X that we are *running*. To allow usage of packages that - explicitly require a newer version of Mac OS X, we must also know the + version of macOS that we are *running*. To allow usage of packages that + explicitly require a newer version of macOS, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its @@ -191,60 +196,96 @@ def get_supported_platform(): m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: - plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) + plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3)) except ValueError: - # not Mac OS X + # not macOS pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery - 'require', 'run_script', 'get_provider', 'get_distribution', - 'load_entry_point', 'get_entry_map', 'get_entry_info', + 'require', + 'run_script', + 'get_provider', + 'get_distribution', + 'load_entry_point', + 'get_entry_map', + 'get_entry_info', 'iter_entry_points', - 'resource_string', 'resource_stream', 'resource_filename', - 'resource_listdir', 'resource_exists', 'resource_isdir', - + 'resource_string', + 'resource_stream', + 'resource_filename', + 'resource_listdir', + 'resource_exists', + 'resource_isdir', # Environmental control - 'declare_namespace', 'working_set', 'add_activation_listener', - 'find_distributions', 'set_extraction_path', 'cleanup_resources', + 'declare_namespace', + 'working_set', + 'add_activation_listener', + 'find_distributions', + 'set_extraction_path', + 'cleanup_resources', 'get_default_cache', - # Primary implementation classes - 'Environment', 'WorkingSet', 'ResourceManager', - 'Distribution', 'Requirement', 'EntryPoint', - + 'Environment', + 'WorkingSet', + 'ResourceManager', + 'Distribution', + 'Requirement', + 'EntryPoint', # Exceptions - 'ResolutionError', 'VersionConflict', 'DistributionNotFound', - 'UnknownExtra', 'ExtractionError', - + 'ResolutionError', + 'VersionConflict', + 'DistributionNotFound', + 'UnknownExtra', + 'ExtractionError', # Warnings 'PEP440Warning', - # Parsing functions and string utilities - 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', - 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', - 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', - + 'parse_requirements', + 'parse_version', + 'safe_name', + 'safe_version', + 'get_platform', + 'compatible_platforms', + 'yield_lines', + 'split_sections', + 'safe_extra', + 'to_filename', + 'invalid_marker', + 'evaluate_marker', # filesystem utilities - 'ensure_directory', 'normalize_path', - + 'ensure_directory', + 'normalize_path', # Distribution "precedence" constants - 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', - + 'EGG_DIST', + 'BINARY_DIST', + 'SOURCE_DIST', + 'CHECKOUT_DIST', + 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs - 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', - 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', - 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', - 'register_finder', 'register_namespace_handler', 'register_loader_type', - 'fixup_namespace_packages', 'get_importer', - + 'IMetadataProvider', + 'IResourceProvider', + 'FileMetadata', + 'PathMetadata', + 'EggMetadata', + 'EmptyProvider', + 'empty_provider', + 'NullProvider', + 'EggProvider', + 'DefaultProvider', + 'ZipProvider', + 'register_finder', + 'register_namespace_handler', + 'register_loader_type', + 'fixup_namespace_packages', + 'get_importer', # Warnings 'PkgResourcesDeprecationWarning', - # Deprecated/backward compatibility only - 'run_main', 'AvailableDistributions', + 'run_main', + 'AvailableDistributions', ] @@ -303,8 +344,10 @@ class ContextualVersionConflict(VersionConflict): class DistributionNotFound(ResolutionError): """A requested distribution was not found""" - _template = ("The '{self.req}' distribution was not found " - "and is required by {self.requirers_str}") + _template = ( + "The '{self.req}' distribution was not found " + "and is required by {self.requirers_str}" + ) @property def req(self): @@ -364,7 +407,7 @@ def get_provider(moduleOrReq): return _find_adapter(_provider_factories, loader)(module) -def _macosx_vers(_cache=[]): +def _macos_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts @@ -380,7 +423,7 @@ def _macosx_vers(_cache=[]): return _cache[0] -def _macosx_arch(machine): +def _macos_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) @@ -388,18 +431,19 @@ def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it - needs some hacks for Linux and Mac OS X. + needs some hacks for Linux and macOS. """ from sysconfig import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: - version = _macosx_vers() + version = _macos_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % ( - int(version[0]), int(version[1]), - _macosx_arch(machine), + int(version[0]), + int(version[1]), + _macos_arch(machine), ) except ValueError: # if someone is running a non-Mac darwin system, this will fall @@ -425,7 +469,7 @@ def compatible_platforms(provided, required): # easy case return True - # Mac OS X special cases + # macOS special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) @@ -434,20 +478,23 @@ def compatible_platforms(provided, required): if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will - # use the new macosx designation. + # use the new macOS designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) - if dversion == 7 and macosversion >= "10.3" or \ - dversion == 8 and macosversion >= "10.4": + if ( + dversion == 7 + and macosversion >= "10.3" + or dversion == 8 + and macosversion >= "10.4" + ): return True - # egg isn't macosx or legacy darwin + # egg isn't macOS or legacy darwin return False # are they the same major version and machine type? - if provMac.group(1) != reqMac.group(1) or \ - provMac.group(3) != reqMac.group(3): + if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? @@ -475,7 +522,7 @@ run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" - if isinstance(dist, six.string_types): + if isinstance(dist, str): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) @@ -509,8 +556,8 @@ class IMetadataProvider: def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines - Leading and trailing whitespace is stripped from each line, and lines - with ``#`` as the first non-blank character are omitted.""" + Leading and trailing whitespace is stripped from each line, and lines + with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" @@ -558,6 +605,7 @@ class WorkingSet: self.entries = [] self.entry_keys = {} self.by_key = {} + self.normalized_to_canonical_keys = {} self.callbacks = [] if entries is None: @@ -638,6 +686,14 @@ class WorkingSet: is returned. """ dist = self.by_key.get(req.key) + + if dist is None: + canonical_key = self.normalized_to_canonical_keys.get(req.key) + + if canonical_key is not None: + req.key = canonical_key + dist = self.by_key.get(canonical_key) + if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) @@ -706,14 +762,22 @@ class WorkingSet: return self.by_key[dist.key] = dist + normalized_name = packaging.utils.canonicalize_name(dist.key) + self.normalized_to_canonical_keys[normalized_name] = dist.key if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) - def resolve(self, requirements, env=None, installer=None, - replace_conflicting=False, extras=None): + def resolve( + self, + requirements, + env=None, + installer=None, + replace_conflicting=False, + extras=None, + ): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, @@ -762,33 +826,9 @@ class WorkingSet: if not req_extras.markers_pass(req, extras): continue - dist = best.get(req.key) - if dist is None: - # Find the best distribution and add it to the map - dist = self.by_key.get(req.key) - if dist is None or (dist not in req and replace_conflicting): - ws = self - if env is None: - if dist is None: - env = Environment(self.entries) - else: - # Use an empty environment and workingset to avoid - # any further conflicts with the conflicting - # distribution - env = Environment([]) - ws = WorkingSet([]) - dist = best[req.key] = env.best_match( - req, ws, installer, - replace_conflicting=replace_conflicting - ) - if dist is None: - requirers = required_by.get(req, None) - raise DistributionNotFound(req, requirers) - to_activate.append(dist) - if dist not in req: - # Oops, the "best" so far conflicts with a dependency - dependent_req = required_by[req] - raise VersionConflict(dist, req).with_context(dependent_req) + dist = self._resolve_dist( + req, best, replace_conflicting, env, installer, required_by, to_activate + ) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] @@ -804,8 +844,38 @@ class WorkingSet: # return list of distros to activate return to_activate - def find_plugins( - self, plugin_env, full_env=None, installer=None, fallback=True): + def _resolve_dist( + self, req, best, replace_conflicting, env, installer, required_by, to_activate + ): + dist = best.get(req.key) + if dist is None: + # Find the best distribution and add it to the map + dist = self.by_key.get(req.key) + if dist is None or (dist not in req and replace_conflicting): + ws = self + if env is None: + if dist is None: + env = Environment(self.entries) + else: + # Use an empty environment and workingset to avoid + # any further conflicts with the conflicting + # distribution + env = Environment([]) + ws = WorkingSet([]) + dist = best[req.key] = env.best_match( + req, ws, installer, replace_conflicting=replace_conflicting + ) + if dist is None: + requirers = required_by.get(req, None) + raise DistributionNotFound(req, requirers) + to_activate.append(dist) + if dist not in req: + # Oops, the "best" so far conflicts with a dependency + dependent_req = required_by[req] + raise VersionConflict(dist, req).with_context(dependent_req) + return dist + + def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: @@ -858,9 +928,7 @@ class WorkingSet: list(map(shadow_set.add, self)) for project_name in plugin_projects: - for dist in plugin_env[project_name]: - req = [dist.as_requirement()] try: @@ -924,15 +992,19 @@ class WorkingSet: def __getstate__(self): return ( - self.entries[:], self.entry_keys.copy(), self.by_key.copy(), - self.callbacks[:] + self.entries[:], + self.entry_keys.copy(), + self.by_key.copy(), + self.normalized_to_canonical_keys.copy(), + self.callbacks[:], ) - def __setstate__(self, e_k_b_c): - entries, keys, by_key, callbacks = e_k_b_c + def __setstate__(self, e_k_b_n_c): + entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() + self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy() self.callbacks = callbacks[:] @@ -960,8 +1032,8 @@ class Environment: """Searchable snapshot of distributions on a search path""" def __init__( - self, search_path=None, platform=get_supported_platform(), - python=PY_MAJOR): + self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR + ): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. @@ -1028,16 +1100,14 @@ class Environment: return self._distmap.get(distribution_key, []) def add(self, dist): - """Add `dist` if we ``can_add()`` it and it has not already been added - """ + """Add `dist` if we ``can_add()`` it and it has not already been added""" if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) - def best_match( - self, req, working_set, installer=None, replace_conflicting=False): + def best_match(self, req, working_set, installer=None, replace_conflicting=False): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a @@ -1124,6 +1194,7 @@ class ExtractionError(RuntimeError): class ResourceManager: """Manage resource extraction and packages""" + extraction_path = None def __init__(self): @@ -1135,9 +1206,7 @@ class ResourceManager: def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" - return get_provider(package_or_requirement).resource_isdir( - resource_name - ) + return get_provider(package_or_requirement).resource_isdir(resource_name) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" @@ -1159,9 +1228,7 @@ class ResourceManager: def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" - return get_provider(package_or_requirement).resource_listdir( - resource_name - ) + return get_provider(package_or_requirement).resource_listdir(resource_name) def extraction_error(self): """Give an error message for problems extracting file(s)""" @@ -1169,7 +1236,8 @@ class ResourceManager: old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() - tmpl = textwrap.dedent(""" + tmpl = textwrap.dedent( + """ Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) @@ -1184,7 +1252,8 @@ class ResourceManager: Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. - """).lstrip() + """ + ).lstrip() err = ExtractionError(tmpl.format(**locals())) err.manager = self err.cache_path = cache_path @@ -1234,12 +1303,13 @@ class ResourceManager: mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ( - "%s is writable by group/others and vulnerable to attack " - "when " - "used with get_resource_filename. Consider a more secure " + "Extraction path is writable by group/others " + "and vulnerable to attack when " + "used with get_resource_filename ({path}). " + "Consider a more secure " "location (set with .set_extraction_path or the " - "PYTHON_EGG_CACHE environment variable)." % path - ) + "PYTHON_EGG_CACHE environment variable)." + ).format(**locals()) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): @@ -1282,9 +1352,7 @@ class ResourceManager: ``cleanup_resources()``.) """ if self.cached_files: - raise ValueError( - "Can't change extraction path, files already extracted" - ) + raise ValueError("Can't change extraction path, files already extracted") self.extraction_path = path @@ -1308,9 +1376,8 @@ def get_default_cache(): or a platform-relevant user cache dir for an app named "Python-Eggs". """ - return ( - os.environ.get('PYTHON_EGG_CACHE') - or appdirs.user_cache_dir(appname='Python-Eggs') + return os.environ.get('PYTHON_EGG_CACHE') or platformdirs.user_cache_dir( + appname='Python-Eggs' ) @@ -1334,6 +1401,38 @@ def safe_version(version): return re.sub('[^A-Za-z0-9.]+', '-', version) +def _forgiving_version(version): + """Fallback when ``safe_version`` is not safe enough + >>> parse_version(_forgiving_version('0.23ubuntu1')) + <Version('0.23.dev0+sanitized.ubuntu1')> + >>> parse_version(_forgiving_version('0.23-')) + <Version('0.23.dev0+sanitized')> + >>> parse_version(_forgiving_version('0.-_')) + <Version('0.dev0+sanitized')> + >>> parse_version(_forgiving_version('42.+?1')) + <Version('42.dev0+sanitized.1')> + >>> parse_version(_forgiving_version('hello world')) + <Version('0.dev0+sanitized.hello.world')> + """ + version = version.replace(' ', '.') + match = _PEP440_FALLBACK.search(version) + if match: + safe = match["safe"] + rest = version[len(safe):] + else: + safe = "0" + rest = version + local = f"sanitized.{_safe_segment(rest)}".strip(".") + return f"{safe}.dev0+{local}" + + +def _safe_segment(segment): + """Convert an arbitrary string into a safe segment""" + segment = re.sub('[^A-Za-z0-9.]+', '-', segment) + segment = re.sub('-[^A-Za-z0-9]+', '-', segment) + return re.sub(r'\.[^A-Za-z0-9]+', '.', segment).strip(".-") + + def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name @@ -1377,7 +1476,7 @@ def evaluate_marker(text, extra=None): marker = packaging.markers.Marker(text) return marker.evaluate() except packaging.markers.InvalidMarker as e: - raise SyntaxError(e) + raise SyntaxError(e) from e class NullProvider: @@ -1418,8 +1517,6 @@ class NullProvider: return "" path = self._get_metadata_path(name) value = self._get(path) - if six.PY2: - return value try: return value.decode('utf-8') except UnicodeDecodeError as exc: @@ -1449,21 +1546,27 @@ class NullProvider: script = 'scripts/' + script_name if not self.has_metadata(script): raise ResolutionError( - "Script {script!r} not found in metadata at {self.egg_info!r}" - .format(**locals()), + "Script {script!r} not found in metadata at {self.egg_info!r}".format( + **locals() + ), ) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): - source = open(script_filename).read() + with open(script_filename) as fid: + source = fid.read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache + cache[script_filename] = ( - len(script_text), 0, script_text.split('\n'), script_filename + len(script_text), + 0, + script_text.split('\n'), + script_filename, ) script_code = compile(script_text, script_filename, 'exec') exec(script_code, namespace, namespace) @@ -1493,7 +1596,7 @@ class NullProvider: def _validate_resource_path(path): """ Validate the resource paths according to the docs. - https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access + https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access >>> warned = getfixture('recwarn') >>> warnings.simplefilter('always') @@ -1543,9 +1646,9 @@ is not allowed. AttributeError: ... """ invalid = ( - os.path.pardir in path.split(posixpath.sep) or - posixpath.isabs(path) or - ntpath.isabs(path) + os.path.pardir in path.split(posixpath.sep) + or posixpath.isabs(path) + or ntpath.isabs(path) ) if not invalid: return @@ -1558,10 +1661,9 @@ is not allowed. # for compatibility, warn; in future # raise ValueError(msg) - warnings.warn( + issue_warning( msg[:-1] + " and will raise exceptions in a future release.", DeprecationWarning, - stacklevel=4, ) def _get(self, path): @@ -1575,26 +1677,35 @@ is not allowed. register_loader_type(object, NullProvider) +def _parents(path): + """ + yield all parents of path including path + """ + last = None + while path != last: + yield path + last = path + path, _ = os.path.split(path) + + class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): - NullProvider.__init__(self, module) + super().__init__(module) self._setup_prefix() def _setup_prefix(self): - # we assume here that our metadata may be nested inside a "basket" - # of multiple eggs; that's why we use module_path instead of .archive - path = self.module_path - old = None - while path != old: - if _is_egg_path(path): - self.egg_name = os.path.basename(path) - self.egg_info = os.path.join(path, 'EGG-INFO') - self.egg_root = path - break - old = path - path, base = os.path.split(path) + # Assume that metadata may be nested inside a "basket" + # of multiple eggs and use module_path instead of .archive. + eggs = filter(_is_egg_path, _parents(self.module_path)) + egg = next(eggs, None) + egg and self._set_egg(egg) + + def _set_egg(self, path): + self.egg_name = os.path.basename(path) + self.egg_info = os.path.join(path, 'EGG-INFO') + self.egg_root = path class DefaultProvider(EggProvider): @@ -1618,7 +1729,10 @@ class DefaultProvider(EggProvider): @classmethod def _register(cls): - loader_names = 'SourceFileLoader', 'SourcelessFileLoader', + loader_names = ( + 'SourceFileLoader', + 'SourcelessFileLoader', + ) for name in loader_names: loader_cls = getattr(importlib_machinery, name, type(None)) register_loader_type(loader_cls, cls) @@ -1678,6 +1792,7 @@ class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ + manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): @@ -1701,7 +1816,7 @@ class ZipProvider(EggProvider): _zip_manifests = MemoizedZipManifests() def __init__(self, module): - EggProvider.__init__(self, module) + super().__init__(module) self.zip_pre = self.loader.archive + os.sep def _zipinfo_name(self, fspath): @@ -1711,20 +1826,16 @@ class ZipProvider(EggProvider): if fspath == self.loader.archive: return '' if fspath.startswith(self.zip_pre): - return fspath[len(self.zip_pre):] - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.zip_pre) - ) + return fspath[len(self.zip_pre) :] + raise AssertionError("%s is not a subpath of %s" % (fspath, self.zip_pre)) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre + zip_path if fspath.startswith(self.egg_root + os.sep): - return fspath[len(self.egg_root) + 1:].split(os.sep) - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.egg_root) - ) + return fspath[len(self.egg_root) + 1 :].split(os.sep) + raise AssertionError("%s is not a subpath of %s" % (fspath, self.egg_root)) @property def zipinfo(self): @@ -1752,26 +1863,22 @@ class ZipProvider(EggProvider): timestamp = time.mktime(date_time) return timestamp, size - def _extract_resource(self, manager, zip_path): - + # FIXME: 'ZipProvider._extract_resource' is too complex (12) + def _extract_resource(self, manager, zip_path): # noqa: C901 if zip_path in self._index(): for name in self._index()[zip_path]: - last = self._extract_resource( - manager, os.path.join(zip_path, name) - ) + last = self._extract_resource(manager, os.path.join(zip_path, name)) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: - raise IOError('"os.rename" and "os.unlink" are not supported ' - 'on this platform') - try: - - real_path = manager.get_cache_path( - self.egg_name, self._parts(zip_path) + raise IOError( + '"os.rename" and "os.unlink" are not supported ' 'on this platform' ) + try: + real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path)) if self._is_current(real_path, zip_path): return real_path @@ -1900,8 +2007,7 @@ class FileMetadata(EmptyProvider): return metadata def _warn_on_replacement(self, metadata): - # Python 2.7 compat for: replacement_char = '�' - replacement_char = b'\xef\xbf\xbd'.decode('utf-8') + replacement_char = '�' if replacement_char in metadata: tmpl = "{self.path} could not be properly decoded in UTF-8" msg = tmpl.format(**locals()) @@ -1991,7 +2097,7 @@ def find_eggs_in_zip(importer, path_item, only=False): dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) for dist in dists: yield dist - elif subitem.lower().endswith('.dist-info'): + elif subitem.lower().endswith(('.dist-info', '.egg-info')): subpath = os.path.join(path_item, subitem) submeta = EggMetadata(zipimport.zipimporter(subpath)) submeta.egg_info = subpath @@ -2008,58 +2114,21 @@ def find_nothing(importer, path_item, only=False): register_finder(object, find_nothing) -def _by_version_descending(names): - """ - Given a list of filenames, return them in descending order - by version number. - - >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' - >>> _by_version_descending(names) - ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] - """ - def _by_version(name): - """ - Parse each component of the filename - """ - name, ext = os.path.splitext(name) - parts = itertools.chain(name.split('-'), [ext]) - return [packaging.version.parse(part) for part in parts] - - return sorted(names, key=_by_version, reverse=True) - - def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if _is_unpacked_egg(path_item): yield Distribution.from_filename( - path_item, metadata=PathMetadata( - path_item, os.path.join(path_item, 'EGG-INFO') - ) + path_item, + metadata=PathMetadata(path_item, os.path.join(path_item, 'EGG-INFO')), ) return - entries = safe_listdir(path_item) - - # for performance, before sorting by version, - # screen entries for only those that will yield - # distributions - filtered = ( - entry - for entry in entries - if dist_factory(path_item, entry, only) - ) + entries = (os.path.join(path_item, child) for child in safe_listdir(path_item)) # scan for .egg and .egg-info in directory - path_item_entries = _by_version_descending(filtered) - for entry in path_item_entries: + for entry in sorted(entries): fullpath = os.path.join(path_item, entry) factory = dist_factory(path_item, entry, only) for dist in factory(fullpath): @@ -2067,19 +2136,21 @@ def find_on_path(importer, path_item, only=False): def dist_factory(path_item, entry, only): - """ - Return a dist_factory for a path_item and entry - """ + """Return a dist_factory for the given entry.""" lower = entry.lower() - is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info'))) + is_egg_info = lower.endswith('.egg-info') + is_dist_info = lower.endswith('.dist-info') and os.path.isdir( + os.path.join(path_item, entry) + ) + is_meta = is_egg_info or is_dist_info return ( distributions_from_metadata - if is_meta else - find_distributions - if not only and _is_egg_path(entry) else - resolve_egg_link - if not only and lower.endswith('.egg-link') else - NoDists() + if is_meta + else find_distributions + if not only and _is_egg_path(entry) + else resolve_egg_link + if not only and lower.endswith('.egg-link') + else NoDists() ) @@ -2091,10 +2162,9 @@ class NoDists: >>> list(NoDists()('anything')) [] """ + def __bool__(self): return False - if six.PY2: - __nonzero__ = __bool__ def __call__(self, fullpath): return iter(()) @@ -2111,12 +2181,7 @@ def safe_listdir(path): except OSError as e: # Ignore the directory if does not exist, not a directory or # permission denied - ignorable = ( - e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT) - # Python 2 on Windows needs to be handled this way :( - or getattr(e, "winerror", None) == 267 - ) - if not ignorable: + if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT): raise return () @@ -2132,7 +2197,10 @@ def distributions_from_metadata(path): metadata = FileMetadata(path) entry = os.path.basename(path) yield Distribution.from_location( - root, entry, metadata, precedence=DEVELOP_DIST, + root, + entry, + metadata, + precedence=DEVELOP_DIST, ) @@ -2154,17 +2222,16 @@ def resolve_egg_link(path): """ referenced_paths = non_empty_lines(path) resolved_paths = ( - os.path.join(os.path.dirname(path), ref) - for ref in referenced_paths + os.path.join(os.path.dirname(path), ref) for ref in referenced_paths ) dist_groups = map(find_distributions, resolved_paths) return next(dist_groups, ()) -register_finder(pkgutil.ImpImporter, find_on_path) +if hasattr(pkgutil, 'ImpImporter'): + register_finder(pkgutil.ImpImporter, find_on_path) -if hasattr(importlib_machinery, 'FileFinder'): - register_finder(importlib_machinery.FileFinder, find_on_path) +register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) @@ -2195,10 +2262,16 @@ def _handle_ns(packageName, path_item): if importer is None: return None - # capture warnings due to #1111 - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - loader = importer.find_module(packageName) + # use find_spec (PEP 451) and fall-back to find_module (PEP 302) + try: + spec = importer.find_spec(packageName) + except AttributeError: + # capture warnings due to #1111 + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + loader = importer.find_module(packageName) + else: + loader = spec.loader if spec else None if loader is None: return None @@ -2214,7 +2287,7 @@ def _handle_ns(packageName, path_item): if subpath is not None: path = module.__path__ path.append(subpath) - loader.load_module(packageName) + importlib.import_module(packageName) _rebuild_mod_path(path, packageName, module) return subpath @@ -2256,6 +2329,15 @@ def _rebuild_mod_path(orig_path, package_name, module): def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" + msg = ( + f"Deprecated call to `pkg_resources.declare_namespace({packageName!r})`.\n" + "Implementing implicit namespace packages (as specified in PEP 420) " + "is preferred to `pkg_resources.declare_namespace`. " + "See https://setuptools.pypa.io/en/latest/references/" + "keywords.html#keyword-namespace-packages" + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + _imp.acquire_lock() try: if packageName in _namespace_packages: @@ -2270,8 +2352,8 @@ def declare_namespace(packageName): __import__(parent) try: path = sys.modules[parent].__path__ - except AttributeError: - raise TypeError("Not a package:", parent) + except AttributeError as e: + raise TypeError("Not a package:", parent) from e # Track what packages are namespaces, so when new path items are added, # they can be updated @@ -2312,11 +2394,11 @@ def file_ns_handler(importer, path_item, packageName, module): return subpath -register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) -register_namespace_handler(zipimport.zipimporter, file_ns_handler) +if hasattr(pkgutil, 'ImpImporter'): + register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) -if hasattr(importlib_machinery, 'FileFinder'): - register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) +register_namespace_handler(zipimport.zipimporter, file_ns_handler) +register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): @@ -2354,16 +2436,23 @@ def _is_egg_path(path): """ Determine if given path appears to be an egg. """ - return path.lower().endswith('.egg') + return _is_zip_egg(path) or _is_unpacked_egg(path) + + +def _is_zip_egg(path): + return ( + path.lower().endswith('.egg') + and os.path.isfile(path) + and zipfile.is_zipfile(path) + ) def _is_unpacked_egg(path): """ Determine if given path appears to be an unpacked egg. """ - return ( - _is_egg_path(path) and - os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) + return path.lower().endswith('.egg') and os.path.isfile( + os.path.join(path, 'EGG-INFO', 'PKG-INFO') ) @@ -2375,20 +2464,6 @@ def _set_parent_ns(packageName): setattr(sys.modules[parent], name, sys.modules[packageName]) -def yield_lines(strs): - """Yield non-empty/non-comment lines of a string or sequence""" - if isinstance(strs, six.string_types): - for s in strs.splitlines(): - s = s.strip() - # skip blank lines/comments - if s and not s.startswith('#'): - yield s - else: - for ss in strs: - for s in yield_lines(ss): - yield s - - MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" @@ -2450,7 +2525,7 @@ class EntryPoint: try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: - raise ImportError(str(exc)) + raise ImportError(str(exc)) from exc def require(self, env=None, installer=None): if self.extras and not self.dist: @@ -2536,22 +2611,15 @@ class EntryPoint: return maps -def _remove_md5_fragment(location): - if not location: - return '' - parsed = urllib.parse.urlparse(location) - if parsed[-1].startswith('md5='): - return urllib.parse.urlunparse(parsed[:-1] + ('',)) - return location - - def _version_from_file(lines): """ Given an iterable of lines from a Metadata file, return the value of the Version field, if present, or None otherwise. """ + def is_version_line(line): return line.lower().startswith('version:') + version_lines = filter(is_version_line, lines) line = next(iter(version_lines), '') _, _, value = line.partition(':') @@ -2560,12 +2628,19 @@ def _version_from_file(lines): class Distribution: """Wrap an actual or potential sys.path entry w/metadata""" + PKG_INFO = 'PKG-INFO' def __init__( - self, location=None, metadata=None, project_name=None, - version=None, py_version=PY_MAJOR, platform=None, - precedence=EGG_DIST): + self, + location=None, + metadata=None, + project_name=None, + version=None, + py_version=PY_MAJOR, + platform=None, + precedence=EGG_DIST, + ): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) @@ -2588,8 +2663,13 @@ class Distribution: 'name', 'ver', 'pyver', 'plat' ) return cls( - location, metadata, project_name=project_name, version=version, - py_version=py_version, platform=platform, **kw + location, + metadata, + project_name=project_name, + version=version, + py_version=py_version, + platform=platform, + **kw, )._reload_version() def _reload_version(self): @@ -2598,10 +2678,10 @@ class Distribution: @property def hashcmp(self): return ( - self.parsed_version, + self._forgiving_parsed_version, self.precedence, self.key, - _remove_md5_fragment(self.location), + self.location, self.py_version or '', self.platform or '', ) @@ -2645,48 +2725,55 @@ class Distribution: @property def parsed_version(self): if not hasattr(self, "_parsed_version"): - self._parsed_version = parse_version(self.version) + try: + self._parsed_version = parse_version(self.version) + except packaging.version.InvalidVersion as ex: + info = f"(package: {self.project_name})" + if hasattr(ex, "add_note"): + ex.add_note(info) # PEP 678 + raise + raise packaging.version.InvalidVersion(f"{str(ex)} {info}") from None return self._parsed_version - def _warn_legacy_version(self): - LV = packaging.version.LegacyVersion - is_legacy = isinstance(self._parsed_version, LV) - if not is_legacy: - return + @property + def _forgiving_parsed_version(self): + try: + return self.parsed_version + except packaging.version.InvalidVersion as ex: + self._parsed_version = parse_version(_forgiving_version(self.version)) - # While an empty version is technically a legacy version and - # is not a valid PEP 440 version, it's also unlikely to - # actually come from someone and instead it is more likely that - # it comes from setuptools attempting to parse a filename and - # including it in the list. So for that we'll gate this warning - # on if the version is anything at all or not. - if not self.version: - return + notes = "\n".join(getattr(ex, "__notes__", [])) # PEP 678 + msg = f"""!!\n\n + ************************************************************************* + {str(ex)}\n{notes} + + This is a long overdue deprecation. + For the time being, `pkg_resources` will use `{self._parsed_version}` + as a replacement to avoid breaking existing environments, + but no future compatibility is guaranteed. - tmpl = textwrap.dedent(""" - '{project_name} ({version})' is being parsed as a legacy, - non PEP 440, - version. You may find odd behavior and sort order. - In particular it will be sorted as less than 0.0. It - is recommended to migrate to PEP 440 compatible - versions. - """).strip().replace('\n', ' ') + If you maintain package {self.project_name} you should implement + the relevant changes to adequate the project to PEP 440 immediately. + ************************************************************************* + \n\n!! + """ + warnings.warn(msg, DeprecationWarning) - warnings.warn(tmpl.format(**vars(self)), PEP440Warning) + return self._parsed_version @property def version(self): try: return self._version - except AttributeError: + except AttributeError as e: version = self._get_version() if version is None: path = self._get_metadata_path_for_display(self.PKG_INFO) - msg = ( - "Missing 'Version:' header and/or {} file at path: {}" - ).format(self.PKG_INFO, path) - raise ValueError(msg, self) + msg = ("Missing 'Version:' header and/or {} file at path: {}").format( + self.PKG_INFO, path + ) + raise ValueError(msg, self) from e return version @@ -2714,8 +2801,7 @@ class Distribution: reqs = dm.pop(extra) new_extra, _, marker = extra.partition(':') fails_marker = marker and ( - invalid_marker(marker) - or not evaluate_marker(marker) + invalid_marker(marker) or not evaluate_marker(marker) ) if fails_marker: reqs = [] @@ -2739,10 +2825,10 @@ class Distribution: for ext in extras: try: deps.extend(dm[safe_extra(ext)]) - except KeyError: + except KeyError as e: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) - ) + ) from e return deps def _get_metadata_path_for_display(self, name): @@ -2787,8 +2873,9 @@ class Distribution: def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( - to_filename(self.project_name), to_filename(self.version), - self.py_version or PY_MAJOR + to_filename(self.project_name), + to_filename(self.version), + self.py_version or PY_MAJOR, ) if self.platform: @@ -2818,21 +2905,13 @@ class Distribution: def __dir__(self): return list( set(super(Distribution, self).__dir__()) - | set( - attr for attr in self._provider.__dir__() - if not attr.startswith('_') - ) + | set(attr for attr in self._provider.__dir__() if not attr.startswith('_')) ) - if not hasattr(object, '__dir__'): - # python 2.7 not supported - del __dir__ - @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( - _normalize_cached(filename), os.path.basename(filename), metadata, - **kw + _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): @@ -2867,7 +2946,8 @@ class Distribution: """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) - def insert_on(self, path, loc=None, replace=False): + # FIXME: 'Distribution.insert_on' is too complex (13) + def insert_on(self, path, loc=None, replace=False): # noqa: C901 """Ensure self.location is on path If replace=False (default): @@ -2943,14 +3023,18 @@ class Distribution: nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): - if (modname not in sys.modules or modname in nsp - or modname in _namespace_packages): + if ( + modname not in sys.modules + or modname in nsp + or modname in _namespace_packages + ): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) - if fn and (normalize_path(fn).startswith(loc) or - fn.startswith(self.location)): + if fn and ( + normalize_path(fn).startswith(loc) or fn.startswith(self.location) + ): continue issue_warning( "Module %s was already imported from %s, but %s is being added" @@ -2963,6 +3047,9 @@ class Distribution: except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False + except SystemError: + # TODO: remove this except clause when python/cpython#103632 is fixed. + return False return True def clone(self, **kw): @@ -3002,6 +3089,7 @@ class DistInfoDistribution(Distribution): Wrap an actual or potential sys.path entry w/metadata, .dist-info style. """ + PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @@ -3037,12 +3125,12 @@ class DistInfoDistribution(Distribution): if not req.marker or req.marker.evaluate({'extra': extra}): yield req - common = frozenset(reqs_for_extra(None)) + common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None))) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: s_extra = safe_extra(extra.strip()) - dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) + dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common] return dm @@ -3067,45 +3155,27 @@ def issue_warning(*args, **kw): warnings.warn(stacklevel=level + 1, *args, **kw) -class RequirementParseError(ValueError): - def __str__(self): - return ' '.join(self.args) - - def parse_requirements(strs): - """Yield ``Requirement`` objects for each specification in `strs` + """ + Yield ``Requirement`` objects for each specification in `strs`. `strs` must be a string, or a (possibly-nested) iterable thereof. """ - # create a steppable iterator, so we can handle \-continuations - lines = iter(yield_lines(strs)) + return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs)))) - for line in lines: - # Drop comments -- a hash without a space may be in a URL. - if ' #' in line: - line = line[:line.find(' #')] - # If there is a line continuation, drop it, and append the next line. - if line.endswith('\\'): - line = line[:-2].strip() - try: - line += next(lines) - except StopIteration: - return - yield Requirement(line) + +class RequirementParseError(packaging.requirements.InvalidRequirement): + "Compatibility wrapper for InvalidRequirement" class Requirement(packaging.requirements.Requirement): def __init__(self, requirement_string): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" - try: - super(Requirement, self).__init__(requirement_string) - except packaging.requirements.InvalidRequirement as e: - raise RequirementParseError(str(e)) + super(Requirement, self).__init__(requirement_string) self.unsafe_name = self.name project_name = safe_name(self.name) self.project_name, self.key = project_name, project_name.lower() - self.specs = [ - (spec.operator, spec.version) for spec in self.specifier] + self.specs = [(spec.operator, spec.version) for spec in self.specifier] self.extras = tuple(map(safe_extra, self.extras)) self.hashCmp = ( self.key, @@ -3117,10 +3187,7 @@ class Requirement(packaging.requirements.Requirement): self.__hash = hash(self.hashCmp) def __eq__(self, other): - return ( - isinstance(other, Requirement) and - self.hashCmp == other.hashCmp - ) + return isinstance(other, Requirement) and self.hashCmp == other.hashCmp def __ne__(self, other): return not self == other @@ -3145,7 +3212,7 @@ class Requirement(packaging.requirements.Requirement): @staticmethod def parse(s): - req, = parse_requirements(s) + (req,) = parse_requirements(s) return req @@ -3170,7 +3237,7 @@ def _find_adapter(registry, ob): def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) - py31compat.makedirs(dirname, exist_ok=True) + os.makedirs(dirname, exist_ok=True) def _bypass_ensure_directory(path): @@ -3248,6 +3315,15 @@ def _initialize(g=globals()): ) +class PkgResourcesDeprecationWarning(Warning): + """ + Base class for warning about deprecations in ``pkg_resources`` + + This class is not derived from ``DeprecationWarning``, and as such is + visible by default. + """ + + @_call_aside def _initialize_master_working_set(): """ @@ -3274,10 +3350,7 @@ def _initialize_master_working_set(): # ensure that all distributions added to the working set in the future # (e.g. by calling ``require()``) will get activated as well, # with higher priority (replace=True). - tuple( - dist.activate(replace=False) - for dist in working_set - ) + tuple(dist.activate(replace=False) for dist in working_set) add_activation_listener( lambda dist: dist.activate(replace=True), existing=False, @@ -3286,11 +3359,3 @@ def _initialize_master_working_set(): # match order list(map(working_set.add_entry, sys.path)) globals().update(locals()) - -class PkgResourcesDeprecationWarning(Warning): - """ - Base class for warning about deprecations in ``pkg_resources`` - - This class is not derived from ``DeprecationWarning``, and as such is - visible by default. - """ diff --git a/env/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 7d423192b74f12758665e9019748f490836d70d5..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/py31compat.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/py31compat.cpython-39.pyc deleted file mode 100644 index ab37be8e77928bd336e09a63cdc0f0c8da407f7e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/pkg_resources/__pycache__/py31compat.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/pkg_resources/py31compat.py b/env/Lib/site-packages/pip/_vendor/pkg_resources/py31compat.py deleted file mode 100644 index a2d3007ceb16b0eeb4b1f57361c089558a25daeb..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pkg_resources/py31compat.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import errno -import sys - -from pip._vendor import six - - -def _makedirs_31(path, exist_ok=False): - try: - os.makedirs(path) - except OSError as exc: - if not exist_ok or exc.errno != errno.EEXIST: - raise - - -# rely on compatibility behavior until mode considerations -# and exists_ok considerations are disentangled. -# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663 -needs_makedirs = ( - six.PY2 or - (3, 4) <= sys.version_info < (3, 4, 1) -) -makedirs = _makedirs_31 if needs_makedirs else os.makedirs diff --git a/env/Lib/site-packages/pip/_vendor/progress/__init__.py b/env/Lib/site-packages/pip/_vendor/progress/__init__.py deleted file mode 100644 index e434c257fefd4eda43f5daf5b8dcf7d4cf9da30b..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/progress/__init__.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import division, print_function - -from collections import deque -from datetime import timedelta -from math import ceil -from sys import stderr -try: - from time import monotonic -except ImportError: - from time import time as monotonic - - -__version__ = '1.5' - -HIDE_CURSOR = '\x1b[?25l' -SHOW_CURSOR = '\x1b[?25h' - - -class Infinite(object): - file = stderr - sma_window = 10 # Simple Moving Average window - check_tty = True - hide_cursor = True - - def __init__(self, message='', **kwargs): - self.index = 0 - self.start_ts = monotonic() - self.avg = 0 - self._avg_update_ts = self.start_ts - self._ts = self.start_ts - self._xput = deque(maxlen=self.sma_window) - for key, val in kwargs.items(): - setattr(self, key, val) - - self._width = 0 - self.message = message - - if self.file and self.is_tty(): - if self.hide_cursor: - print(HIDE_CURSOR, end='', file=self.file) - print(self.message, end='', file=self.file) - self.file.flush() - - def __getitem__(self, key): - if key.startswith('_'): - return None - return getattr(self, key, None) - - @property - def elapsed(self): - return int(monotonic() - self.start_ts) - - @property - def elapsed_td(self): - return timedelta(seconds=self.elapsed) - - def update_avg(self, n, dt): - if n > 0: - xput_len = len(self._xput) - self._xput.append(dt / n) - now = monotonic() - # update when we're still filling _xput, then after every second - if (xput_len < self.sma_window or - now - self._avg_update_ts > 1): - self.avg = sum(self._xput) / len(self._xput) - self._avg_update_ts = now - - def update(self): - pass - - def start(self): - pass - - def clearln(self): - if self.file and self.is_tty(): - print('\r\x1b[K', end='', file=self.file) - - def write(self, s): - if self.file and self.is_tty(): - line = self.message + s.ljust(self._width) - print('\r' + line, end='', file=self.file) - self._width = max(self._width, len(s)) - self.file.flush() - - def writeln(self, line): - if self.file and self.is_tty(): - self.clearln() - print(line, end='', file=self.file) - self.file.flush() - - def finish(self): - if self.file and self.is_tty(): - print(file=self.file) - if self.hide_cursor: - print(SHOW_CURSOR, end='', file=self.file) - - def is_tty(self): - return self.file.isatty() if self.check_tty else True - - def next(self, n=1): - now = monotonic() - dt = now - self._ts - self.update_avg(n, dt) - self._ts = now - self.index = self.index + n - self.update() - - def iter(self, it): - with self: - for x in it: - yield x - self.next() - - def __enter__(self): - self.start() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.finish() - - -class Progress(Infinite): - def __init__(self, *args, **kwargs): - super(Progress, self).__init__(*args, **kwargs) - self.max = kwargs.get('max', 100) - - @property - def eta(self): - return int(ceil(self.avg * self.remaining)) - - @property - def eta_td(self): - return timedelta(seconds=self.eta) - - @property - def percent(self): - return self.progress * 100 - - @property - def progress(self): - return min(1, self.index / self.max) - - @property - def remaining(self): - return max(self.max - self.index, 0) - - def start(self): - self.update() - - def goto(self, index): - incr = index - self.index - self.next(incr) - - def iter(self, it): - try: - self.max = len(it) - except TypeError: - pass - - with self: - for x in it: - yield x - self.next() diff --git a/env/Lib/site-packages/pip/_vendor/progress/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/progress/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 59331d34d3f0a9b879d94dabfe954b129ee10bb6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/progress/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/progress/__pycache__/bar.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/progress/__pycache__/bar.cpython-39.pyc deleted file mode 100644 index 4444a268fe93d187e8fe4761ddd5b1d14c129255..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/progress/__pycache__/bar.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/progress/__pycache__/counter.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/progress/__pycache__/counter.cpython-39.pyc deleted file mode 100644 index 801c1561f9475b6ca748f056260878c085e13375..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/progress/__pycache__/counter.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/progress/__pycache__/spinner.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/progress/__pycache__/spinner.cpython-39.pyc deleted file mode 100644 index ba7ea893a261c2e9462a7a6c31be6a1b84b4a289..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/progress/__pycache__/spinner.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/progress/bar.py b/env/Lib/site-packages/pip/_vendor/progress/bar.py deleted file mode 100644 index 8819efda65b9f6bf1315ad15bedc5a3857bf67fc..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/progress/bar.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import unicode_literals - -import sys - -from . import Progress - - -class Bar(Progress): - width = 32 - suffix = '%(index)d/%(max)d' - bar_prefix = ' |' - bar_suffix = '| ' - empty_fill = ' ' - fill = '#' - - def update(self): - filled_length = int(self.width * self.progress) - empty_length = self.width - filled_length - - message = self.message % self - bar = self.fill * filled_length - empty = self.empty_fill * empty_length - suffix = self.suffix % self - line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix, - suffix]) - self.writeln(line) - - -class ChargingBar(Bar): - suffix = '%(percent)d%%' - bar_prefix = ' ' - bar_suffix = ' ' - empty_fill = '∙' - fill = '█' - - -class FillingSquaresBar(ChargingBar): - empty_fill = '▢' - fill = '▣' - - -class FillingCirclesBar(ChargingBar): - empty_fill = '◯' - fill = '◉' - - -class IncrementalBar(Bar): - if sys.platform.startswith('win'): - phases = (u' ', u'▌', u'█') - else: - phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█') - - def update(self): - nphases = len(self.phases) - filled_len = self.width * self.progress - nfull = int(filled_len) # Number of full chars - phase = int((filled_len - nfull) * nphases) # Phase of last char - nempty = self.width - nfull # Number of empty chars - - message = self.message % self - bar = self.phases[-1] * nfull - current = self.phases[phase] if phase > 0 else '' - empty = self.empty_fill * max(0, nempty - len(current)) - suffix = self.suffix % self - line = ''.join([message, self.bar_prefix, bar, current, empty, - self.bar_suffix, suffix]) - self.writeln(line) - - -class PixelBar(IncrementalBar): - phases = ('⡀', '⡄', '⡆', '⡇', '⣇', '⣧', '⣷', '⣿') - - -class ShadyBar(IncrementalBar): - phases = (' ', '░', '▒', '▓', '█') diff --git a/env/Lib/site-packages/pip/_vendor/progress/counter.py b/env/Lib/site-packages/pip/_vendor/progress/counter.py deleted file mode 100644 index d955ca4771bb2cd78a8e9334a6917f9f6cce59b1..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/progress/counter.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import unicode_literals -from . import Infinite, Progress - - -class Counter(Infinite): - def update(self): - self.write(str(self.index)) - - -class Countdown(Progress): - def update(self): - self.write(str(self.remaining)) - - -class Stack(Progress): - phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') - - def update(self): - nphases = len(self.phases) - i = min(nphases - 1, int(self.progress * nphases)) - self.write(self.phases[i]) - - -class Pie(Stack): - phases = ('○', '◔', '◑', '◕', '●') diff --git a/env/Lib/site-packages/pip/_vendor/progress/spinner.py b/env/Lib/site-packages/pip/_vendor/progress/spinner.py deleted file mode 100644 index 4e100cabb9b8d7113111d1b09a7b84da2895e2a0..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/progress/spinner.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import unicode_literals -from . import Infinite - - -class Spinner(Infinite): - phases = ('-', '\\', '|', '/') - hide_cursor = True - - def update(self): - i = self.index % len(self.phases) - self.write(self.phases[i]) - - -class PieSpinner(Spinner): - phases = ['◷', '◶', '◵', '◴'] - - -class MoonSpinner(Spinner): - phases = ['◑', '◒', '◐', '◓'] - - -class LineSpinner(Spinner): - phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻'] - - -class PixelSpinner(Spinner): - phases = ['⣾', '⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽'] diff --git a/env/Lib/site-packages/pip/_vendor/pyparsing.py b/env/Lib/site-packages/pip/_vendor/pyparsing.py deleted file mode 100644 index 7ebc7eb994bba9ed32f30d557af93627cae4d543..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/pyparsing.py +++ /dev/null @@ -1,7107 +0,0 @@ -# -*- coding: utf-8 -*- -# module pyparsing.py -# -# Copyright (c) 2003-2019 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -__doc__ = \ -""" -pyparsing module - Classes and methods to define and execute parsing grammars -============================================================================= - -The pyparsing module is an alternative approach to creating and -executing simple grammars, vs. the traditional lex/yacc approach, or the -use of regular expressions. With pyparsing, you don't need to learn -a new syntax for defining grammars or matching expressions - the parsing -module provides a library of classes that you use to construct the -grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form -``"<salutation>, <addressee>!"``), built up using :class:`Word`, -:class:`Literal`, and :class:`And` elements -(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions, -and the strings are auto-converted to :class:`Literal` expressions):: - - from pip._vendor.pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word(alphas) + "," + Word(alphas) + "!" - - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the -self-explanatory class names, and the use of '+', '|' and '^' operators. - -The :class:`ParseResults` object returned from -:class:`ParserElement.parseString` can be -accessed as a nested list, a dictionary, or an object with named -attributes. - -The pyparsing module handles some of the problems that are typically -vexing when writing text parsers: - - - extra or missing whitespace (the above program will also handle - "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments - - -Getting Started - ------------------ -Visit the classes :class:`ParserElement` and :class:`ParseResults` to -see the base classes that most other pyparsing -classes inherit from. Use the docstrings for examples of how to: - - - construct literal match expressions from :class:`Literal` and - :class:`CaselessLiteral` classes - - construct character word-group expressions using the :class:`Word` - class - - see how to create repetitive expressions using :class:`ZeroOrMore` - and :class:`OneOrMore` classes - - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`, - and :class:`'&'<Each>` operators to combine simple expressions into - more complex ones - - associate names with your parsed results using - :class:`ParserElement.setResultsName` - - access the parsed data, which is returned as a :class:`ParseResults` - object - - find some helpful expression short-cuts like :class:`delimitedList` - and :class:`oneOf` - - find more useful common expressions in the :class:`pyparsing_common` - namespace class -""" - -__version__ = "2.4.7" -__versionTime__ = "30 Mar 2020 00:43 UTC" -__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" - -import string -from weakref import ref as wkref -import copy -import sys -import warnings -import re -import sre_constants -import collections -import pprint -import traceback -import types -from datetime import datetime -from operator import itemgetter -import itertools -from functools import wraps -from contextlib import contextmanager - -try: - # Python 3 - from itertools import filterfalse -except ImportError: - from itertools import ifilterfalse as filterfalse - -try: - from _thread import RLock -except ImportError: - from threading import RLock - -try: - # Python 3 - from collections.abc import Iterable - from collections.abc import MutableMapping, Mapping -except ImportError: - # Python 2.7 - from collections import Iterable - from collections import MutableMapping, Mapping - -try: - from collections import OrderedDict as _OrderedDict -except ImportError: - try: - from ordereddict import OrderedDict as _OrderedDict - except ImportError: - _OrderedDict = None - -try: - from types import SimpleNamespace -except ImportError: - class SimpleNamespace: pass - -# version compatibility configuration -__compat__ = SimpleNamespace() -__compat__.__doc__ = """ - A cross-version compatibility configuration for pyparsing features that will be - released in a future version. By setting values in this configuration to True, - those features can be enabled in prior versions for compatibility development - and testing. - - - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping - of results names when an And expression is nested within an Or or MatchFirst; set to - True to enable bugfix released in pyparsing 2.3.0, or False to preserve - pre-2.3.0 handling of named results -""" -__compat__.collect_all_And_tokens = True - -__diag__ = SimpleNamespace() -__diag__.__doc__ = """ -Diagnostic configuration (all default to False) - - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results - name is defined on a MatchFirst or Or expression with one or more And subexpressions - (only warns if __compat__.collect_all_And_tokens is False) - - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results - name is defined on a containing expression with ungrouped subexpressions that also - have results names - - warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined - with a results name, but has no contents defined - - warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is - incorrectly called with multiple str arguments - - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent - calls to ParserElement.setName() -""" -__diag__.warn_multiple_tokens_in_named_alternation = False -__diag__.warn_ungrouped_named_tokens_in_collection = False -__diag__.warn_name_set_on_empty_Forward = False -__diag__.warn_on_multiple_string_args_to_oneof = False -__diag__.enable_debug_on_named_expressions = False -__diag__._all_names = [nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")] - -def _enable_all_warnings(): - __diag__.warn_multiple_tokens_in_named_alternation = True - __diag__.warn_ungrouped_named_tokens_in_collection = True - __diag__.warn_name_set_on_empty_Forward = True - __diag__.warn_on_multiple_string_args_to_oneof = True -__diag__.enable_all_warnings = _enable_all_warnings - - -__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__', - 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', - 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', - 'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', - 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', - 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', - 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', - 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', - 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', - 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', - 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', - 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', - 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', - 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', - 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', - 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', - 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', - 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass', - 'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', - 'conditionAsParseAction', 're', - ] - -system_version = tuple(sys.version_info)[:3] -PY_3 = system_version[0] == 3 -if PY_3: - _MAX_INT = sys.maxsize - basestring = str - unichr = chr - unicode = str - _ustr = str - - # build list of single arg builtins, that can be used as parse actions - singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] - -else: - _MAX_INT = sys.maxint - range = xrange - - def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode - friendly. It first tries str(obj). If that fails with - a UnicodeEncodeError, then it tries unicode(obj). It then - < returns the unicode object | encodes it with the default - encoding | ... >. - """ - if isinstance(obj, unicode): - return obj - - try: - # If this works, then _ustr(obj) has the same behaviour as str(obj), so - # it won't break any existing code. - return str(obj) - - except UnicodeEncodeError: - # Else encode it - ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') - xmlcharref = Regex(r'&#\d+;') - xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) - return xmlcharref.transformString(ret) - - # build list of single arg builtins, tolerant of Python version, that can be used as parse actions - singleArgBuiltins = [] - import __builtin__ - - for fname in "sum len sorted reversed list tuple set any all min max".split(): - try: - singleArgBuiltins.append(getattr(__builtin__, fname)) - except AttributeError: - continue - -_generatorType = type((y for y in range(1))) - -def _xml_escape(data): - """Escape &, <, >, ", ', etc. in a string of data.""" - - # ampersand must be replaced first - from_symbols = '&><"\'' - to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split()) - for from_, to_ in zip(from_symbols, to_symbols): - data = data.replace(from_, to_) - return data - -alphas = string.ascii_uppercase + string.ascii_lowercase -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) -printables = "".join(c for c in string.printable if c not in string.whitespace) - - -def conditionAsParseAction(fn, message=None, fatal=False): - msg = message if message is not None else "failed user-defined condition" - exc_type = ParseFatalException if fatal else ParseException - fn = _trim_arity(fn) - - @wraps(fn) - def pa(s, l, t): - if not bool(fn(s, l, t)): - raise exc_type(s, l, msg) - - return pa - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__(self, pstr, loc=0, msg=None, elem=None): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parserElement = elem - self.args = (pstr, loc, msg) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - def __getattr__(self, aname): - """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - if aname == "lineno": - return lineno(self.loc, self.pstr) - elif aname in ("col", "column"): - return col(self.loc, self.pstr) - elif aname == "line": - return line(self.loc, self.pstr) - else: - raise AttributeError(aname) - - def __str__(self): - if self.pstr: - if self.loc >= len(self.pstr): - foundstr = ', found end of text' - else: - foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\', '\\') - else: - foundstr = '' - return ("%s%s (at char %d), (line:%d, col:%d)" % - (self.msg, foundstr, self.loc, self.lineno, self.column)) - def __repr__(self): - return _ustr(self) - def markInputline(self, markerString=">!<"): - """Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join((line_str[:line_column], - markerString, line_str[line_column:])) - return line_str.strip() - def __dir__(self): - return "lineno col line".split() + dir(type(self)) - -class ParseException(ParseBaseException): - """ - Exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - - Example:: - - try: - Word(nums).setName("integer").parseString("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.col)) - - prints:: - - Expected integer (at char 0), (line:1, col:1) - column: 1 - - """ - - @staticmethod - def explain(exc, depth=16): - """ - Method to take an exception and translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - exc - exception raised during parsing (need not be a ParseException, in support - of Python exceptions that might be raised in a parse action) - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - - Note: the diagnostic output will include string representations of the expressions - that failed to parse. These representations will be more helpful if you use `setName` to - give identifiable names to your expressions. Otherwise they will use the default string - forms, which may be cryptic to read. - - explain() is only supported under Python 3. - """ - import inspect - - if depth is None: - depth = sys.getrecursionlimit() - ret = [] - if isinstance(exc, ParseBaseException): - ret.append(exc.line) - ret.append(' ' * (exc.col - 1) + '^') - ret.append("{0}: {1}".format(type(exc).__name__, exc)) - - if depth > 0: - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for i, ff in enumerate(callers[-depth:]): - frm = ff[0] - - f_self = frm.f_locals.get('self', None) - if isinstance(f_self, ParserElement): - if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'): - continue - if f_self in seen: - continue - seen.add(f_self) - - self_type = type(f_self) - ret.append("{0}.{1} - {2}".format(self_type.__module__, - self_type.__name__, - f_self)) - elif f_self is not None: - self_type = type(f_self) - ret.append("{0}.{1}".format(self_type.__module__, - self_type.__name__)) - else: - code = frm.f_code - if code.co_name in ('wrapper', '<module>'): - continue - - ret.append("{0}".format(code.co_name)) - - depth -= 1 - if not depth: - break - - return '\n'.join(ret) - - -class ParseFatalException(ParseBaseException): - """user-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately""" - pass - -class ParseSyntaxException(ParseFatalException): - """just like :class:`ParseFatalException`, but thrown internally - when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates - that parsing is to stop immediately because an unbacktrackable - syntax error has been found. - """ - pass - -#~ class ReparseException(ParseBaseException): - #~ """Experimental class - parse actions can raise this exception to cause - #~ pyparsing to reparse the input string: - #~ - with a modified input string, and/or - #~ - with a modified start location - #~ Set the values of the ReparseException in the constructor, and raise the - #~ exception in a parse action to cause pyparsing to use the new string/location. - #~ Setting the values as None causes no change to be made. - #~ """ - #~ def __init_( self, newstring, restartLoc ): - #~ self.newParseText = newstring - #~ self.reparseLoc = restartLoc - -class RecursiveGrammarException(Exception): - """exception thrown by :class:`ParserElement.validate` if the - grammar could be improperly recursive - """ - def __init__(self, parseElementList): - self.parseElementTrace = parseElementList - - def __str__(self): - return "RecursiveGrammarException: %s" % self.parseElementTrace - -class _ParseResultsWithOffset(object): - def __init__(self, p1, p2): - self.tup = (p1, p2) - def __getitem__(self, i): - return self.tup[i] - def __repr__(self): - return repr(self.tup[0]) - def setOffset(self, i): - self.tup = (self.tup[0], i) - -class ParseResults(object): - """Structured parse results, to provide multiple means of access to - the parsed data: - - - as a list (``len(results)``) - - by list index (``results[0], results[1]``, etc.) - - by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`) - - Example:: - - integer = Word(nums) - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - # equivalent form: - # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - # parseString returns a ParseResults object - result = date_str.parseString("1999/12/31") - - def test(s, fn=repr): - print("%s -> %s" % (s, fn(eval(s)))) - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - - prints:: - - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: 31 - - month: 12 - - year: 1999 - """ - def __new__(cls, toklist=None, name=None, asList=True, modal=True): - if isinstance(toklist, cls): - return toklist - retobj = object.__new__(cls) - retobj.__doinit = True - return retobj - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance): - if self.__doinit: - self.__doinit = False - self.__name = None - self.__parent = None - self.__accumNames = {} - self.__asList = asList - self.__modal = modal - if toklist is None: - toklist = [] - if isinstance(toklist, list): - self.__toklist = toklist[:] - elif isinstance(toklist, _generatorType): - self.__toklist = list(toklist) - else: - self.__toklist = [toklist] - self.__tokdict = dict() - - if name is not None and name: - if not modal: - self.__accumNames[name] = 0 - if isinstance(name, int): - name = _ustr(name) # will always return a str, but use _ustr for consistency - self.__name = name - if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])): - if isinstance(toklist, basestring): - toklist = [toklist] - if asList: - if isinstance(toklist, ParseResults): - self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) - self[name].__name = name - else: - try: - self[name] = toklist[0] - except (KeyError, TypeError, IndexError): - self[name] = toklist - - def __getitem__(self, i): - if isinstance(i, (int, slice)): - return self.__toklist[i] - else: - if i not in self.__accumNames: - return self.__tokdict[i][-1][0] - else: - return ParseResults([v[0] for v in self.__tokdict[i]]) - - def __setitem__(self, k, v, isinstance=isinstance): - if isinstance(v, _ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k, list()) + [v] - sub = v[0] - elif isinstance(k, (int, slice)): - self.__toklist[k] = v - sub = v - else: - self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)] - sub = v - if isinstance(sub, ParseResults): - sub.__parent = wkref(self) - - def __delitem__(self, i): - if isinstance(i, (int, slice)): - mylen = len(self.__toklist) - del self.__toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i + 1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name, occurrences in self.__tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) - else: - del self.__tokdict[i] - - def __contains__(self, k): - return k in self.__tokdict - - def __len__(self): - return len(self.__toklist) - - def __bool__(self): - return (not not self.__toklist) - __nonzero__ = __bool__ - - def __iter__(self): - return iter(self.__toklist) - - def __reversed__(self): - return iter(self.__toklist[::-1]) - - def _iterkeys(self): - if hasattr(self.__tokdict, "iterkeys"): - return self.__tokdict.iterkeys() - else: - return iter(self.__tokdict) - - def _itervalues(self): - return (self[k] for k in self._iterkeys()) - - def _iteritems(self): - return ((k, self[k]) for k in self._iterkeys()) - - if PY_3: - keys = _iterkeys - """Returns an iterator of all named result keys.""" - - values = _itervalues - """Returns an iterator of all named result values.""" - - items = _iteritems - """Returns an iterator of all named result key-value tuples.""" - - else: - iterkeys = _iterkeys - """Returns an iterator of all named result keys (Python 2.x only).""" - - itervalues = _itervalues - """Returns an iterator of all named result values (Python 2.x only).""" - - iteritems = _iteritems - """Returns an iterator of all named result key-value tuples (Python 2.x only).""" - - def keys(self): - """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iterkeys()) - - def values(self): - """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.itervalues()) - - def items(self): - """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iteritems()) - - def haskeys(self): - """Since keys() returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return bool(self.__tokdict) - - def pop(self, *args, **kwargs): - """ - Removes and returns item at specified index (default= ``last``). - Supports both ``list`` and ``dict`` semantics for ``pop()``. If - passed no argument or an integer argument, it will use ``list`` - semantics and pop tokens from the list of parsed tokens. If passed - a non-integer argument (most likely a string), it will use ``dict`` - semantics and pop the corresponding value from any defined results - names. A second default return value argument is supported, just as in - ``dict.pop()``. - - Example:: - - def remove_first(tokens): - tokens.pop(0) - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + OneOrMore(Word(nums)) - print(patt.parseString("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.addParseAction(remove_LABEL) - print(patt.parseString("AAB 123 321").dump()) - - prints:: - - ['AAB', '123', '321'] - - LABEL: AAB - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k, v in kwargs.items(): - if k == 'default': - args = (args[0], v) - else: - raise TypeError("pop() got an unexpected keyword argument '%s'" % k) - if (isinstance(args[0], int) - or len(args) == 1 - or args[0] in self): - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, defaultValue=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given ``defaultValue`` or ``None`` if no - ``defaultValue`` is specified. - - Similar to ``dict.get()``. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return defaultValue - - def insert(self, index, insStr): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to ``list.insert()``. - - Example:: - - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] - """ - self.__toklist.insert(index, insStr) - # fixup indices in token dictionary - for name, occurrences in self.__tokdict.items(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - - def append(self, item): - """ - Add single element to end of ParseResults list of elements. - - Example:: - - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] - """ - self.__toklist.append(item) - - def extend(self, itemseq): - """ - Add sequence of elements to end of ParseResults list of elements. - - Example:: - - patt = OneOrMore(Word(alphas)) - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self.__iadd__(itemseq) - else: - self.__toklist.extend(itemseq) - - def clear(self): - """ - Clear all elements and results names. - """ - del self.__toklist[:] - self.__tokdict.clear() - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - return "" - - def __add__(self, other): - ret = self.copy() - ret += other - return ret - - def __iadd__(self, other): - if other.__tokdict: - offset = len(self.__toklist) - addoffset = lambda a: offset if a < 0 else a + offset - otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) - for k, vlist in otheritems for v in vlist] - for k, v in otherdictitems: - self[k] = v - if isinstance(v[0], ParseResults): - v[0].__parent = wkref(self) - - self.__toklist += other.__toklist - self.__accumNames.update(other.__accumNames) - return self - - def __radd__(self, other): - if isinstance(other, int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__(self): - return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict)) - - def __str__(self): - return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' - - def _asStringList(self, sep=''): - out = [] - for item in self.__toklist: - if out and sep: - out.append(sep) - if isinstance(item, ParseResults): - out += item._asStringList() - else: - out.append(_ustr(item)) - return out - - def asList(self): - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - - patt = OneOrMore(Word(alphas)) - result = patt.parseString("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] - - # Use asList() to create an actual list - result_list = result.asList() - print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist] - - def asDict(self): - """ - Returns the named parse results as a nested dictionary. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.asDict() - print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - if PY_3: - item_fn = self.items - else: - item_fn = self.iteritems - - def toItem(obj): - if isinstance(obj, ParseResults): - if obj.haskeys(): - return obj.asDict() - else: - return [toItem(v) for v in obj] - else: - return obj - - return dict((k, toItem(v)) for k, v in item_fn()) - - def copy(self): - """ - Returns a new copy of a :class:`ParseResults` object. - """ - ret = ParseResults(self.__toklist) - ret.__tokdict = dict(self.__tokdict.items()) - ret.__parent = self.__parent - ret.__accumNames.update(self.__accumNames) - ret.__name = self.__name - return ret - - def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True): - """ - (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. - """ - nl = "\n" - out = [] - namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items() - for v in vlist) - nextLevelIndent = indent + " " - - # collapse out indents if formatting is not desired - if not formatted: - indent = "" - nextLevelIndent = "" - nl = "" - - selfTag = None - if doctag is not None: - selfTag = doctag - else: - if self.__name: - selfTag = self.__name - - if not selfTag: - if namedItemsOnly: - return "" - else: - selfTag = "ITEM" - - out += [nl, indent, "<", selfTag, ">"] - - for i, res in enumerate(self.__toklist): - if isinstance(res, ParseResults): - if i in namedItems: - out += [res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - out += [res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - # individual token, see if there is a name for it - resTag = None - if i in namedItems: - resTag = namedItems[i] - if not resTag: - if namedItemsOnly: - continue - else: - resTag = "ITEM" - xmlBodyText = _xml_escape(_ustr(res)) - out += [nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - "</", resTag, ">"] - - out += [nl, indent, "</", selfTag, ">"] - return "".join(out) - - def __lookup(self, sub): - for k, vlist in self.__tokdict.items(): - for v, loc in vlist: - if sub is v: - return k - return None - - def getName(self): - r""" - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = OneOrMore(user_data) - - result = user_info.parseString("22 111-22-3333 #221B") - for item in result: - print(item.getName(), ':', item[0]) - - prints:: - - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self.__name: - return self.__name - elif self.__parent: - par = self.__parent() - if par: - return par.__lookup(self) - else: - return None - elif (len(self) == 1 - and len(self.__tokdict) == 1 - and next(iter(self.__tokdict.values()))[0][1] in (0, -1)): - return next(iter(self.__tokdict.keys())) - else: - return None - - def dump(self, indent='', full=True, include_list=True, _depth=0): - """ - Diagnostic method for listing out the contents of - a :class:`ParseResults`. Accepts an optional ``indent`` argument so - that this string can be embedded in a nested display of other data. - - Example:: - - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(result.dump()) - - prints:: - - ['12', '/', '31', '/', '1999'] - - day: 1999 - - month: 31 - - year: 12 - """ - out = [] - NL = '\n' - if include_list: - out.append(indent + _ustr(self.asList())) - else: - out.append('') - - if full: - if self.haskeys(): - items = sorted((str(k), v) for k, v in self.items()) - for k, v in items: - if out: - out.append(NL) - out.append("%s%s- %s: " % (indent, (' ' * _depth), k)) - if isinstance(v, ParseResults): - if v: - out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1)) - else: - out.append(_ustr(v)) - else: - out.append(repr(v)) - elif any(isinstance(vv, ParseResults) for vv in self): - v = self - for i, vv in enumerate(v): - if isinstance(vv, ParseResults): - out.append("\n%s%s[%d]:\n%s%s%s" % (indent, - (' ' * (_depth)), - i, - indent, - (' ' * (_depth + 1)), - vv.dump(indent=indent, - full=full, - include_list=include_list, - _depth=_depth + 1))) - else: - out.append("\n%s%s[%d]:\n%s%s%s" % (indent, - (' ' * (_depth)), - i, - indent, - (' ' * (_depth + 1)), - _ustr(vv))) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the - `pprint <https://docs.python.org/3/library/pprint.html>`_ module. - Accepts additional positional or keyword args as defined for - `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ . - - Example:: - - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(delimitedList(term))) - result = func.parseString("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - - prints:: - - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.asList(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return (self.__toklist, - (self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name)) - - def __setstate__(self, state): - self.__toklist = state[0] - self.__tokdict, par, inAccumNames, self.__name = state[1] - self.__accumNames = {} - self.__accumNames.update(inAccumNames) - if par is not None: - self.__parent = wkref(par) - else: - self.__parent = None - - def __getnewargs__(self): - return self.__toklist, self.__name, self.__asList, self.__modal - - def __dir__(self): - return dir(type(self)) + list(self.keys()) - - @classmethod - def from_dict(cls, other, name=None): - """ - Helper classmethod to construct a ParseResults from a dict, preserving the - name-value relations as results names. If an optional 'name' argument is - given, a nested ParseResults will be returned - """ - def is_iterable(obj): - try: - iter(obj) - except Exception: - return False - else: - if PY_3: - return not isinstance(obj, (str, bytes)) - else: - return not isinstance(obj, basestring) - - ret = cls([]) - for k, v in other.items(): - if isinstance(v, Mapping): - ret += cls.from_dict(v, name=k) - else: - ret += cls([v], name=k, asList=is_iterable(v)) - if name is not None: - ret = cls([ret], name=name) - return ret - -MutableMapping.register(ParseResults) - -def col (loc, strg): - """Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See - :class:`ParserElement.parseString` for more - information on parsing strings containing ``<TAB>`` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - """ - s = strg - return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) - -def lineno(loc, strg): - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note - the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`ParserElement.parseString` - for more information on parsing strings containing ``<TAB>`` s, and - suggested methods to maintain a consistent view of the parsed string, the - parse location, and line and column positions within the parsed string. - """ - return strg.count("\n", 0, loc) + 1 - -def line(loc, strg): - """Returns the line of text containing loc within a string, counting newlines as line separators. - """ - lastCR = strg.rfind("\n", 0, loc) - nextCR = strg.find("\n", loc) - if nextCR >= 0: - return strg[lastCR + 1:nextCR] - else: - return strg[lastCR + 1:] - -def _defaultStartDebugAction(instring, loc, expr): - print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring)))) - -def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks): - print("Matched " + _ustr(expr) + " -> " + str(toks.asList())) - -def _defaultExceptionDebugAction(instring, loc, expr, exc): - print("Exception raised:" + _ustr(exc)) - -def nullDebugAction(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - pass - -# Only works on Python 3.x - nonlocal is toxic to Python 2 installs -#~ 'decorator to trim function calls to match the arity of the target' -#~ def _trim_arity(func, maxargs=3): - #~ if func in singleArgBuiltins: - #~ return lambda s,l,t: func(t) - #~ limit = 0 - #~ foundArity = False - #~ def wrapper(*args): - #~ nonlocal limit,foundArity - #~ while 1: - #~ try: - #~ ret = func(*args[limit:]) - #~ foundArity = True - #~ return ret - #~ except TypeError: - #~ if limit == maxargs or foundArity: - #~ raise - #~ limit += 1 - #~ continue - #~ return wrapper - -# this version is Python 2.x-3.x cross-compatible -'decorator to trim function calls to match the arity of the target' -def _trim_arity(func, maxargs=2): - if func in singleArgBuiltins: - return lambda s, l, t: func(t) - limit = [0] - foundArity = [False] - - # traceback return data structure changed in Py3.5 - normalize back to plain tuples - if system_version[:2] >= (3, 5): - def extract_stack(limit=0): - # special handling for Python 3.5.0 - extra deep call stack by 1 - offset = -3 if system_version == (3, 5, 0) else -2 - frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset] - return [frame_summary[:2]] - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [frame_summary[:2]] - else: - extract_stack = traceback.extract_stack - extract_tb = traceback.extract_tb - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - LINE_DIFF = 6 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - this_line = extract_stack(limit=2)[-1] - pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF) - - def wrapper(*args): - while 1: - try: - ret = func(*args[limit[0]:]) - foundArity[0] = True - return ret - except TypeError: - # re-raise TypeErrors if they did not come from our arity testing - if foundArity[0]: - raise - else: - try: - tb = sys.exc_info()[-1] - if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: - raise - finally: - try: - del tb - except NameError: - pass - - if limit[0] <= maxargs: - limit[0] += 1 - continue - raise - - # copy func name to wrapper for sensible debug output - func_name = "<parse action>" - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - wrapper.__name__ = func_name - - return wrapper - - -class ParserElement(object): - """Abstract base level parser element class.""" - DEFAULT_WHITE_CHARS = " \n\t\r" - verbose_stacktrace = False - - @staticmethod - def setDefaultWhitespaceChars(chars): - r""" - Overrides the default whitespace chars - - Example:: - - # default whitespace chars are space, <TAB> and newline - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.setDefaultWhitespaceChars(" \t") - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - @staticmethod - def inlineLiteralsUsing(cls): - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inlineLiteralsUsing(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - @classmethod - def _trim_traceback(cls, tb): - while tb.tb_next: - tb = tb.tb_next - return tb - - def __init__(self, savelist=False): - self.parseAction = list() - self.failAction = None - # ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall - self.strRepr = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) - self.copyDefaultWhiteChars = True - self.mayReturnEmpty = False # used when checking for left-recursion - self.keepTabs = False - self.ignoreExprs = list() - self.debug = False - self.streamlined = False - self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index - self.errmsg = "" - self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = (None, None, None) # custom debug actions - self.re = None - self.callPreparse = True # used to avoid redundant calls to preParse - self.callDuringTry = False - - def copy(self): - """ - Make a copy of this :class:`ParserElement`. Useful for defining - different parse actions for the same parsing pattern, using copies of - the original parse element. - - Example:: - - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K") - integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - - print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) - - prints:: - - [5120, 100, 655360, 268435456] - - Equivalent form of ``expr.copy()`` is just ``expr()``:: - - integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") - """ - cpy = copy.copy(self) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - return cpy - - def setName(self, name): - """ - Define name for this expression, makes debugging and exception messages clearer. - - Example:: - - Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) - Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.name = name - self.errmsg = "Expected " + self.name - if __diag__.enable_debug_on_named_expressions: - self.setDebug() - return self - - def setResultsName(self, name, listAllMatches=False): - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original :class:`ParserElement` object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - ``expr("name")`` in place of ``expr.setResultsName("name")`` - - see :class:`__call__`. - - Example:: - - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - return self._setResultsName(name, listAllMatches) - - def _setResultsName(self, name, listAllMatches=False): - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches = True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def setBreak(self, breakFlag=True): - """Method to invoke the Python pdb debugger when this element is - about to be parsed. Set ``breakFlag`` to True to enable, False to - disable. - """ - if breakFlag: - _parseMethod = self._parse - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - # this call to pdb.set_trace() is intentional, not a checkin error - pdb.set_trace() - return _parseMethod(instring, loc, doActions, callPreParse) - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse, "_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def setParseAction(self, *fns, **kwargs): - """ - Define one or more actions to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` , - ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: - - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object - - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - If None is passed as the parse action, all previously added parse actions for this - expression are cleared. - - Optional keyword arguments: - - callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See :class:`parseString for more - information on parsing strings containing ``<TAB>`` s, and suggested - methods to maintain a consistent view of the parsed string, the parse - location, and line and column positions within the parsed string. - - Example:: - - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - # use parse action to convert to ints at parse time - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - date_str = integer + '/' + integer + '/' + integer - - # note that integer fields are now ints, not strings - date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] - """ - if list(fns) == [None,]: - self.parseAction = [] - else: - if not all(callable(fn) for fn in fns): - raise TypeError("parse actions must be callable") - self.parseAction = list(map(_trim_arity, list(fns))) - self.callDuringTry = kwargs.get("callDuringTry", False) - return self - - def addParseAction(self, *fns, **kwargs): - """ - Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`. - - See examples in :class:`copy`. - """ - self.parseAction += list(map(_trim_arity, list(fns))) - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def addCondition(self, *fns, **kwargs): - """Add a boolean predicate function to expression's list of parse actions. See - :class:`setParseAction` for function call signatures. Unlike ``setParseAction``, - functions passed to ``addCondition`` need to return boolean success/fail of the condition. - - Optional keyword arguments: - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException - - Example:: - - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) - """ - for fn in fns: - self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'), - fatal=kwargs.get('fatal', False))) - - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def setFailAction(self, fn): - """Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - ``fn(s, loc, expr, err)`` where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw :class:`ParseFatalException` - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables(self, instring, loc): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc, dummy = e._parse(instring, loc) - exprsFound = True - except ParseException: - pass - return loc - - def preParse(self, instring, loc): - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - - if self.skipWhitespace: - wt = self.whiteChars - instrlen = len(instring) - while loc < instrlen and instring[loc] in wt: - loc += 1 - - return loc - - def parseImpl(self, instring, loc, doActions=True): - return loc, [] - - def postParse(self, instring, loc, tokenlist): - return tokenlist - - # ~ @profile - def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True): - TRY, MATCH, FAIL = 0, 1, 2 - debugging = (self.debug) # and doActions) - - if debugging or self.failAction: - # ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring))) - if self.debugActions[TRY]: - self.debugActions[TRY](instring, loc, self) - try: - if callPreParse and self.callPreparse: - preloc = self.preParse(instring, loc) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or preloc >= len(instring): - try: - loc, tokens = self.parseImpl(instring, preloc, doActions) - except IndexError: - raise ParseException(instring, len(instring), self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, preloc, doActions) - except Exception as err: - # ~ print ("Exception raised:", err) - if self.debugActions[FAIL]: - self.debugActions[FAIL](instring, tokensStart, self, err) - if self.failAction: - self.failAction(instring, tokensStart, self, err) - raise - else: - if callPreParse and self.callPreparse: - preloc = self.preParse(instring, loc) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or preloc >= len(instring): - try: - loc, tokens = self.parseImpl(instring, preloc, doActions) - except IndexError: - raise ParseException(instring, len(instring), self.errmsg, self) - else: - loc, tokens = self.parseImpl(instring, preloc, doActions) - - tokens = self.postParse(instring, loc, tokens) - - retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - try: - tokens = fn(instring, tokensStart, retTokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - exc.__cause__ = parse_action_exc - raise exc - - if tokens is not None and tokens is not retTokens: - retTokens = ParseResults(tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults) - except Exception as err: - # ~ print "Exception raised in user parse action:", err - if self.debugActions[FAIL]: - self.debugActions[FAIL](instring, tokensStart, self, err) - raise - else: - for fn in self.parseAction: - try: - tokens = fn(instring, tokensStart, retTokens) - except IndexError as parse_action_exc: - exc = ParseException("exception raised in parse action") - exc.__cause__ = parse_action_exc - raise exc - - if tokens is not None and tokens is not retTokens: - retTokens = ParseResults(tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), - modal=self.modalResults) - if debugging: - # ~ print ("Matched", self, "->", retTokens.asList()) - if self.debugActions[MATCH]: - self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens) - - return loc, retTokens - - def tryParse(self, instring, loc): - try: - return self._parse(instring, loc, doActions=False)[0] - except ParseFatalException: - raise ParseException(instring, loc, self.errmsg, self) - - def canParseNext(self, instring, loc): - try: - self.tryParse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - class _UnboundedCache(object): - def __init__(self): - cache = {} - self.not_in_cache = not_in_cache = object() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - - def clear(self): - cache.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - if _OrderedDict is not None: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = _OrderedDict() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - while len(cache) > size: - try: - cache.popitem(False) - except KeyError: - pass - - def clear(self): - cache.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - else: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = {} - key_fifo = collections.deque([], size) - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - while len(key_fifo) > size: - cache.pop(key_fifo.popleft(), None) - key_fifo.append(key) - - def clear(self): - cache.clear() - key_fifo.clear() - - def cache_len(self): - return len(cache) - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - self.__len__ = types.MethodType(cache_len, self) - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache(self, instring, loc, doActions=True, callPreParse=True): - HIT, MISS = 0, 1 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy())) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if isinstance(value, Exception): - raise value - return value[0], value[1].copy() - - _parse = _parseNoCache - - @staticmethod - def resetCache(): - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) - - _packratEnabled = False - @staticmethod - def enablePackrat(cache_size_limit=128): - """Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - - cache_size_limit - (default= ``128``) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method :class:`ParserElement.enablePackrat`. - For best results, call ``enablePackrat()`` immediately after - importing pyparsing. - - Example:: - - from pip._vendor import pyparsing - pyparsing.ParserElement.enablePackrat() - """ - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = ParserElement._UnboundedCache() - else: - ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parseString(self, instring, parseAll=False): - """ - Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - Returns the parsed data as a :class:`ParseResults` object, which may be - accessed as a list, or as a dict or object with attributes if the given parser - includes results names. - - If you want the grammar to require that the entire input string be - successfully parsed, then set ``parseAll`` to True (equivalent to ending - the grammar with ``StringEnd()``). - - Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the ``loc`` argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - - calling ``parseWithTabs`` on your grammar before calling ``parseString`` - (see :class:`parseWithTabs`) - - define your parse action using the full ``(s, loc, toks)`` signature, and - reference the input string using the parse action's ``s`` argument - - explictly expand the tabs in your input string before calling - ``parseString`` - - Example:: - - Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] - Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text - """ - ParserElement.resetCache() - if not self.streamlined: - self.streamline() - # ~ self.saveAsList = True - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse(instring, 0) - if parseAll: - loc = self.preParse(instring, loc) - se = Empty() + StringEnd() - se._parse(instring, loc) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - else: - return tokens - - def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - ``maxMatches`` argument, to clip scanning after 'n' matches are found. If - ``overlap`` is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See :class:`parseString` for more information on parsing - strings with embedded tabs. - - Example:: - - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens, start, end in Word(alphas).scanString(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = _ustr(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn(instring, loc) - nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) - except ParseException: - loc = preloc + 1 - else: - if nextLoc > loc: - matches += 1 - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn(instring, loc) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc + 1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def transformString(self, instring): - """ - Extension to :class:`scanString`, to modify matching text with modified tokens that may - be returned from a parse action. To use ``transformString``, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking ``transformString()`` on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. ``transformString()`` returns the resulting transformed string. - - Example:: - - wd = Word(alphas) - wd.setParseAction(lambda toks: toks[0].title()) - - print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) - - prints:: - - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out = [] - lastE = 0 - # force preservation of <TAB>s, to minimize unwanted transformation of string, and to - # keep string locs straight between transformString and scanString - self.keepTabs = True - try: - for t, s, e in self.scanString(instring): - out.append(instring[lastE:s]) - if t: - if isinstance(t, ParseResults): - out += t.asList() - elif isinstance(t, list): - out += t - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join(map(_ustr, _flatten(out))) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def searchString(self, instring, maxMatches=_MAX_INT): - """ - Another extension to :class:`scanString`, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - ``maxMatches`` argument, to clip searching after 'n' matches are found. - - Example:: - - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) - - # the sum() builtin can be used to merge results into a single ParseResults object - print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) - - prints:: - - [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] - ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] - """ - try: - return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): - """ - Generator method to split a string using the given expression as a separator. - May be called with optional ``maxsplit`` argument, to limit the number of splits; - and the optional ``includeSeparators`` argument (default= ``False``), if the separating - matching text should be included in the split results. - - Example:: - - punc = oneOf(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - - prints:: - - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - splits = 0 - last = 0 - for t, s, e in self.scanString(instring, maxMatches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other): - """ - Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement - converts them to :class:`Literal`s by default. - - Example:: - - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - - prints:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - - ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. - - Literal('start') + ... + Literal('end') - - is equivalent to: - - Literal('start') + SkipTo('end')("_skipped*") + Literal('end') - - Note that the skipped text is returned with '_skipped' as a results name, - and to support having multiple skips in the same parser, the value returned is - a list of all skipped text. - """ - if other is Ellipsis: - return _PendingSkip(self) - - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And([self, other]) - - def __radd__(self, other): - """ - Implementation of + operator when left operand is not a :class:`ParserElement` - """ - if other is Ellipsis: - return SkipTo(self)("_skipped*") + self - - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other + self - - def __sub__(self, other): - """ - Implementation of - operator, returns :class:`And` with error stop - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return self + And._ErrorStop() + other - - def __rsub__(self, other): - """ - Implementation of - operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other - self - - def __mul__(self, other): - """ - Implementation of * operator, allows use of ``expr * 3`` in place of - ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer - tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples - may also include ``None`` as in: - - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` - - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` - - Note that ``expr*(None, n)`` does not raise an exception if - more than n exprs exist in the input stream; that is, - ``expr*(None, n)`` does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - ``expr*(None, n) + ~expr`` - """ - if other is Ellipsis: - other = (0, None) - elif isinstance(other, tuple) and other[:1] == (Ellipsis,): - other = ((0, ) + other[1:] + (None,))[:2] - - if isinstance(other, int): - minElements, optElements = other, 0 - elif isinstance(other, tuple): - other = tuple(o if o is not Ellipsis else None for o in other) - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0], int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self * other[0] + ZeroOrMore(self) - elif isinstance(other[0], int) and isinstance(other[1], int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1])) - else: - raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError("second tuple value must be greater or equal to first tuple value") - if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0, 0)") - - if optElements: - def makeOptionalList(n): - if n > 1: - return Optional(self + makeOptionalList(n - 1)) - else: - return Optional(self) - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self] * minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self] * minElements) - return ret - - def __rmul__(self, other): - return self.__mul__(other) - - def __or__(self, other): - """ - Implementation of | operator - returns :class:`MatchFirst` - """ - if other is Ellipsis: - return _PendingSkip(self, must_skip=True) - - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return MatchFirst([self, other]) - - def __ror__(self, other): - """ - Implementation of | operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other | self - - def __xor__(self, other): - """ - Implementation of ^ operator - returns :class:`Or` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Or([self, other]) - - def __rxor__(self, other): - """ - Implementation of ^ operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other ^ self - - def __and__(self, other): - """ - Implementation of & operator - returns :class:`Each` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Each([self, other]) - - def __rand__(self, other): - """ - Implementation of & operator when left operand is not a :class:`ParserElement` - """ - if isinstance(other, basestring): - other = self._literalStringClass(other) - if not isinstance(other, ParserElement): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other & self - - def __invert__(self): - """ - Implementation of ~ operator - returns :class:`NotAny` - """ - return NotAny(self) - - def __iter__(self): - # must implement __iter__ to override legacy use of sequential access to __getitem__ to - # iterate over a sequence - raise TypeError('%r object is not iterable' % self.__class__.__name__) - - def __getitem__(self, key): - """ - use ``[]`` indexing notation as a short form for expression repetition: - - ``expr[n]`` is equivalent to ``expr*n`` - - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` - - ``expr[n, ...]`` or ``expr[n,]`` is equivalent - to ``expr*n + ZeroOrMore(expr)`` - (read as "at least n instances of ``expr``") - - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` - (read as "0 to n instances of ``expr``") - - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` - - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` - ``None`` may be used in place of ``...``. - - Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception - if more than ``n`` ``expr``s exist in the input stream. If this behavior is - desired, then write ``expr[..., n] + ~expr``. - """ - - # convert single arg keys to tuples - try: - if isinstance(key, str): - key = (key,) - iter(key) - except TypeError: - key = (key, key) - - if len(key) > 2: - warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5], - '... [{0}]'.format(len(key)) - if len(key) > 5 else '')) - - # clip to 2 elements - ret = self * tuple(key[:2]) - return ret - - def __call__(self, name=None): - """ - Shortcut for :class:`setResultsName`, with ``listAllMatches=False``. - - If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be - passed as ``True``. - - If ``name` is omitted, same as calling :class:`copy`. - - Example:: - - # these are equivalent - userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno") - userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") - """ - if name is not None: - return self._setResultsName(name) - else: - return self.copy() - - def suppress(self): - """ - Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress(self) - - def leaveWhitespace(self): - """ - Disables the skipping of whitespace before matching the characters in the - :class:`ParserElement`'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - """ - self.skipWhitespace = False - return self - - def setWhitespaceChars(self, chars): - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = chars - self.copyDefaultWhiteChars = False - return self - - def parseWithTabs(self): - """ - Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string. - Must be called before ``parseString`` when the input grammar contains elements that - match ``<TAB>`` characters. - """ - self.keepTabs = True - return self - - def ignore(self, other): - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - - patt = OneOrMore(Word(alphas)) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] - - patt.ignore(cStyleComment) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] - """ - if isinstance(other, basestring): - other = Suppress(other) - - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append(Suppress(other.copy())) - return self - - def setDebugActions(self, startAction, successAction, exceptionAction): - """ - Enable display of debugging messages while doing pattern matching. - """ - self.debugActions = (startAction or _defaultStartDebugAction, - successAction or _defaultSuccessDebugAction, - exceptionAction or _defaultExceptionDebugAction) - self.debug = True - return self - - def setDebug(self, flag=True): - """ - Enable display of debugging messages while doing pattern matching. - Set ``flag`` to True to enable, False to disable. - - Example:: - - wd = Word(alphas).setName("alphaword") - integer = Word(nums).setName("numword") - term = wd | integer - - # turn on debugging for wd - wd.setDebug() - - OneOrMore(term).parseString("abc 123 xyz 890") - - prints:: - - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using :class:`setDebugActions`. Prior to attempting - to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"`` - is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` - message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``. - """ - if flag: - self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) - else: - self.debug = False - return self - - def __str__(self): - return self.name - - def __repr__(self): - return _ustr(self) - - def streamline(self): - self.streamlined = True - self.strRepr = None - return self - - def checkRecursion(self, parseElementList): - pass - - def validate(self, validateTrace=None): - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self.checkRecursion([]) - - def parseFile(self, file_or_filename, parseAll=False): - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r") as f: - file_contents = f.read() - try: - return self.parseString(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clearing out pyparsing internal stack trace - if getattr(exc, '__traceback__', None) is not None: - exc.__traceback__ = self._trim_traceback(exc.__traceback__) - raise exc - - def __eq__(self, other): - if self is other: - return True - elif isinstance(other, basestring): - return self.matches(other) - elif isinstance(other, ParserElement): - return vars(self) == vars(other) - return False - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return id(self) - - def __req__(self, other): - return self == other - - def __rne__(self, other): - return not (self == other) - - def matches(self, testString, parseAll=True): - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - testString - to test against this expression for a match - - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests - - Example:: - - expr = Word(nums) - assert expr.matches("100") - """ - try: - self.parseString(_ustr(testString), parseAll=parseAll) - return True - except ParseBaseException: - return False - - def runTests(self, tests, parseAll=True, comment='#', - fullDump=True, printResults=True, failureTests=False, postParse=None, - file=None): - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - tests - a list of separate test strings, or a multiline string of test strings - - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests - - comment - (default= ``'#'``) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - printResults - (default= ``True``) prints test output to stdout - - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing - - postParse - (default= ``None``) optional callback for successful parse results; called as - `fn(test_string, parse_results)` and returns a string to be added to the test output - - file - (default=``None``) optional file-like object to which test output will be written; - if None, will default to ``sys.stdout`` - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if ``failureTests`` is True), and the results contain a list of lines of each - test's output - - Example:: - - number_expr = pyparsing_common.number.copy() - - result = number_expr.runTests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.runTests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failureTests=True) - print("Success" if result[0] else "Failed!") - - prints:: - - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading 'r'.) - """ - if isinstance(tests, basestring): - tests = list(map(str.strip, tests.rstrip().splitlines())) - if isinstance(comment, basestring): - comment = Literal(comment) - if file is None: - file = sys.stdout - print_ = file.write - - allResults = [] - comments = [] - success = True - NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString) - BOM = u'\ufeff' - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append(t) - continue - if not t: - continue - out = ['\n' + '\n'.join(comments) if comments else '', t] - comments = [] - try: - # convert newline marks to actual newlines, and strip leading BOM if present - t = NL.transformString(t.lstrip(BOM)) - result = self.parseString(t, parseAll=parseAll) - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - if '\n' in t: - out.append(line(pe.loc, t)) - out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal) - else: - out.append(' ' * pe.loc + '^' + fatal) - out.append("FAIL: " + str(pe)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: " + str(exc)) - success = success and failureTests - result = exc - else: - success = success and not failureTests - if postParse is not None: - try: - pp_value = postParse(t, result) - if pp_value is not None: - if isinstance(pp_value, ParseResults): - out.append(pp_value.dump()) - else: - out.append(str(pp_value)) - else: - out.append(result.dump()) - except Exception as e: - out.append(result.dump(full=fullDump)) - out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) - else: - out.append(result.dump(full=fullDump)) - - if printResults: - if fullDump: - out.append('') - print_('\n'.join(out)) - - allResults.append((t, result)) - - return success, allResults - - -class _PendingSkip(ParserElement): - # internal placeholder class to hold a place were '...' is added to a parser element, - # once another ParserElement is added, this placeholder will be replaced with a SkipTo - def __init__(self, expr, must_skip=False): - super(_PendingSkip, self).__init__() - self.strRepr = str(expr + Empty()).replace('Empty', '...') - self.name = self.strRepr - self.anchor = expr - self.must_skip = must_skip - - def __add__(self, other): - skipper = SkipTo(other).setName("...")("_skipped*") - if self.must_skip: - def must_skip(t): - if not t._skipped or t._skipped.asList() == ['']: - del t[0] - t.pop("_skipped", None) - def show_skip(t): - if t._skipped.asList()[-1:] == ['']: - skipped = t.pop('_skipped') - t['_skipped'] = 'missing <' + repr(self.anchor) + '>' - return (self.anchor + skipper().addParseAction(must_skip) - | skipper().addParseAction(show_skip)) + other - - return self.anchor + skipper + other - - def __repr__(self): - return self.strRepr - - def parseImpl(self, *args): - raise Exception("use of `...` expression without following SkipTo target expression") - - -class Token(ParserElement): - """Abstract :class:`ParserElement` subclass, for defining atomic - matching patterns. - """ - def __init__(self): - super(Token, self).__init__(savelist=False) - - -class Empty(Token): - """An empty token, will always match. - """ - def __init__(self): - super(Empty, self).__init__() - self.name = "Empty" - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """A token that will never match. - """ - def __init__(self): - super(NoMatch, self).__init__() - self.name = "NoMatch" - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl(self, instring, loc, doActions=True): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """Token to exactly match a specified string. - - Example:: - - Literal('blah').parseString('blah') # -> ['blah'] - Literal('blah').parseString('blahfooblah') # -> ['blah'] - Literal('blah').parseString('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use :class:`CaselessLiteral`. - - For keyword matching (force word break before and after the matched string), - use :class:`Keyword` or :class:`CaselessKeyword`. - """ - def __init__(self, matchString): - super(Literal, self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Literal; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.__class__ = Empty - self.name = '"%s"' % _ustr(self.match) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: modify __class__ to select - # a parseImpl optimized for single-character check - if self.matchLen == 1 and type(self) is Literal: - self.__class__ = _SingleCharLiteral - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc): - return loc + self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - -class _SingleCharLiteral(Literal): - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] == self.firstMatchChar: - return loc + 1, self.match - raise ParseException(instring, loc, self.errmsg, self) - -_L = Literal -ParserElement._literalStringClass = Literal - -class Keyword(Token): - """Token to exactly match a specified string as a keyword, that is, - it must be immediately followed by a non-keyword character. Compare - with :class:`Literal`: - - - ``Literal("if")`` will match the leading ``'if'`` in - ``'ifAndOnlyIf'``. - - ``Keyword("if")`` will not; it will only match the leading - ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` - - Accepts two optional constructor arguments in addition to the - keyword string: - - - ``identChars`` is a string of characters that would be valid - identifier characters, defaulting to all alphanumerics + "_" and - "$" - - ``caseless`` allows case-insensitive matching, default is ``False``. - - Example:: - - Keyword("start").parseString("start") # -> ['start'] - Keyword("start").parseString("starting") # -> Exception - - For case-insensitive matching, use :class:`CaselessKeyword`. - """ - DEFAULT_KEYWORD_CHARS = alphanums + "_$" - - def __init__(self, matchString, identChars=None, caseless=False): - super(Keyword, self).__init__() - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.name = '"%s"' % self.match - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = matchString.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def parseImpl(self, instring, loc, doActions=True): - if self.caseless: - if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch) - and (loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen].upper() not in self.identChars) - and (loc == 0 - or instring[loc - 1].upper() not in self.identChars)): - return loc + self.matchLen, self.match - - else: - if instring[loc] == self.firstMatchChar: - if ((self.matchLen == 1 or instring.startswith(self.match, loc)) - and (loc >= len(instring) - self.matchLen - or instring[loc + self.matchLen] not in self.identChars) - and (loc == 0 or instring[loc - 1] not in self.identChars)): - return loc + self.matchLen, self.match - - raise ParseException(instring, loc, self.errmsg, self) - - def copy(self): - c = super(Keyword, self).copy() - c.identChars = Keyword.DEFAULT_KEYWORD_CHARS - return c - - @staticmethod - def setDefaultKeywordChars(chars): - """Overrides the default Keyword chars - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - -class CaselessLiteral(Literal): - """Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - - OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for :class:`CaselessKeyword`.) - """ - def __init__(self, matchString): - super(CaselessLiteral, self).__init__(matchString.upper()) - # Preserve the defining literal. - self.returnString = matchString - self.name = "'%s'" % self.returnString - self.errmsg = "Expected " + self.name - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc:loc + self.matchLen].upper() == self.match: - return loc + self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - -class CaselessKeyword(Keyword): - """ - Caseless version of :class:`Keyword`. - - Example:: - - OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] - - (Contrast with example for :class:`CaselessLiteral`.) - """ - def __init__(self, matchString, identChars=None): - super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True) - -class CloseMatch(Token): - """A variation on :class:`Literal` which matches "close" matches, - that is, strings with at most 'n' mismatching characters. - :class:`CloseMatch` takes parameters: - - - ``match_string`` - string to be matched - - ``maxMismatches`` - (``default=1``) maximum number of - mismatches allowed to count as a match - - The results from a successful parse will contain the matched text - from the input string and the following named results: - - - ``mismatches`` - a list of the positions within the - match_string where mismatches were found - - ``original`` - the original match_string used to compare - against the input string - - If ``mismatches`` is an empty list, then the match was an exact - match. - - Example:: - - patt = CloseMatch("ATCATCGAATGGA") - patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) - patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - def __init__(self, match_string, maxMismatches=1): - super(CloseMatch, self).__init__() - self.name = match_string - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) - self.mayIndexError = False - self.mayReturnEmpty = False - - def parseImpl(self, instring, loc, doActions=True): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)): - src, mat = s_m - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results['original'] = match_string - results['mismatches'] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, an - optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. An optional ``excludeChars`` parameter can - list characters that might be found in the input ``bodyChars`` - string; useful to define a word of all printables except for one or - two characters, for instance. - - :class:`srange` is useful for defining custom character set strings - for defining ``Word`` expressions, using range notation from - regular expression character sets. - - A common mistake is to use :class:`Word` to match a specific literal - string, as in ``Word("Address")``. Remember that :class:`Word` - uses the string argument to define *sets* of matchable characters. - This expression would match "Add", "AAA", "dAred", or any other word - made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an - exact literal string, use :class:`Literal` or :class:`Keyword`. - - pyparsing includes helper strings for building Words: - - - :class:`alphas` - - :class:`nums` - - :class:`alphanums` - - :class:`hexnums` - - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 - - accented, tilded, umlauted, etc.) - - :class:`punc8bit` (non-alphabetic characters in ASCII range - 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - :class:`printables` (any non-whitespace character) - - Example:: - - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums + '-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, excludeChars=",") - """ - def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None): - super(Word, self).__init__() - if excludeChars: - excludeChars = set(excludeChars) - initChars = ''.join(c for c in initChars if c not in excludeChars) - if bodyChars: - bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) - self.initCharsOrig = initChars - self.initChars = set(initChars) - if bodyChars: - self.bodyCharsOrig = bodyChars - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = initChars - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0): - if self.bodyCharsOrig == self.initCharsOrig: - self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) - elif len(self.initCharsOrig) == 1: - self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - else: - self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - if self.asKeyword: - self.reString = r"\b" + self.reString + r"\b" - - try: - self.re = re.compile(self.reString) - except Exception: - self.re = None - else: - self.re_match = self.re.match - self.__class__ = _WordRegex - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.initChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min(maxloc, instrlen) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - elif self.asKeyword: - if (start > 0 and instring[start - 1] in bodychars - or loc < instrlen and instring[loc] in bodychars): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__(self): - try: - return super(Word, self).__str__() - except Exception: - pass - - if self.strRepr is None: - - def charsAsStr(s): - if len(s) > 4: - return s[:4] + "..." - else: - return s - - if self.initCharsOrig != self.bodyCharsOrig: - self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig)) - else: - self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) - - return self.strRepr - -class _WordRegex(Word): - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - -class Char(_WordRegex): - """A short-cut class for defining ``Word(characters, exact=1)``, - when defining a match of any single character in a string of - characters. - """ - def __init__(self, charset, asKeyword=False, excludeChars=None): - super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars) - self.reString = "[%s]" % _escapeRegexRangeChars(''.join(self.initChars)) - if asKeyword: - self.reString = r"\b%s\b" % self.reString - self.re = re.compile(self.reString) - self.re_match = self.re.match - - -class Regex(Token): - r"""Token for matching strings that match a given regular - expression. Defined with string specifying the regular expression in - a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_. - If the given regex contains named groups (defined using ``(?P<name>...)``), - these will be preserved as named parse results. - - If instead of the Python stdlib re module you wish to use a different RE module - (such as the `regex` module), you can replace it by either building your - Regex object with a compiled RE that was compiled using regex: - - Example:: - - realnum = Regex(r"[+-]?\d+\.\d*") - date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') - # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - - # use regex module instead of stdlib re module to construct a Regex using - # a compiled regular expression - import regex - parser = pp.Regex(regex.compile(r'[0-9]')) - - """ - def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False): - """The parameters ``pattern`` and ``flags`` are passed - to the ``re.compile()`` function as-is. See the Python - `re module <https://docs.python.org/3/library/re.html>`_ module for an - explanation of the acceptable patterns and flags. - """ - super(Regex, self).__init__() - - if isinstance(pattern, basestring): - if not pattern: - warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) - - self.pattern = pattern - self.flags = flags - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) - raise - - elif hasattr(pattern, 'pattern') and hasattr(pattern, 'match'): - self.re = pattern - self.pattern = self.reString = pattern.pattern - self.flags = flags - - else: - raise TypeError("Regex may only be constructed with a string or a compiled RE object") - - self.re_match = self.re.match - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = self.re_match("") is not None - self.asGroupList = asGroupList - self.asMatch = asMatch - if self.asGroupList: - self.parseImpl = self.parseImplAsGroupList - if self.asMatch: - self.parseImpl = self.parseImplAsMatch - - def parseImpl(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = ParseResults(result.group()) - d = result.groupdict() - if d: - for k, v in d.items(): - ret[k] = v - return loc, ret - - def parseImplAsGroupList(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.groups() - return loc, ret - - def parseImplAsMatch(self, instring, loc, doActions=True): - result = self.re_match(instring, loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result - return loc, ret - - def __str__(self): - try: - return super(Regex, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "Re:(%s)" % repr(self.pattern) - - return self.strRepr - - def sub(self, repl): - r""" - Return Regex with an attached parse action to transform the parsed - result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. - - Example:: - - make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>") - print(make_html.transformString("h1:main title:")) - # prints "<h1>main title</h1>" - """ - if self.asGroupList: - warnings.warn("cannot use sub() with Regex(asGroupList=True)", - SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if self.asMatch and callable(repl): - warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", - SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if self.asMatch: - def pa(tokens): - return tokens[0].expand(repl) - else: - def pa(tokens): - return self.re.sub(repl, tokens[0]) - return self.addParseAction(pa) - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - - quoteChar - string of one or more characters defining the - quote delimiting string - - escChar - character to escape quotes, typically backslash - (default= ``None``) - - escQuote - special quote sequence to escape an embedded quote - string (such as SQL's ``""`` to escape an embedded ``"``) - (default= ``None``) - - multiline - boolean indicating whether quotes can span - multiple lines (default= ``False``) - - unquoteResults - boolean indicating whether the matched text - should be unquoted (default= ``True``) - - endQuoteChar - string of one or more characters defining the - end of the quote delimited string (default= ``None`` => same as - quoteChar) - - convertWhitespaceEscapes - convert escaped whitespace - (``'\t'``, ``'\n'``, etc.) to actual whitespace - (default= ``True``) - - Example:: - - qs = QuotedString('"') - print(qs.searchString('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', endQuoteChar='}}') - print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', escQuote='""') - print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - - prints:: - - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, - unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): - super(QuotedString, self).__init__() - - # remove white space from quote chars - wont work anyway - quoteChar = quoteChar.strip() - if not quoteChar: - warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) - raise SyntaxError() - - if endQuoteChar is None: - endQuoteChar = quoteChar - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) - raise SyntaxError() - - self.quoteChar = quoteChar - self.quoteCharLen = len(quoteChar) - self.firstQuoteChar = quoteChar[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '')) - else: - self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % (re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '')) - if len(self.endQuoteChar) > 1: - self.pattern += ( - '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')') - - if escQuote: - self.pattern += (r'|(?:%s)' % re.escape(escQuote)) - if escChar: - self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar) + "(.)" - self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - self.re_match = self.re.match - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) - raise - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen: -self.endQuoteCharLen] - - if isinstance(ret, basestring): - # replace escaped whitespace - if '\\' in ret and self.convertWhitespaceEscapes: - ws_map = { - r'\t': '\t', - r'\n': '\n', - r'\f': '\f', - r'\r': '\r', - } - for wslit, wschar in ws_map.items(): - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - def __str__(self): - try: - return super(QuotedString, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) - - return self.strRepr - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given - set (will include whitespace in matched characters if not listed in - the provided exclusion set - see example). Defined with string - containing all disallowed characters, and an optional minimum, - maximum, and/or exact length. The default value for ``min`` is - 1 (a minimum value < 1 is not valid); the default values for - ``max`` and ``exact`` are 0, meaning no maximum or exact - length restriction. - - Example:: - - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) - - prints:: - - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - def __init__(self, notChars, min=1, max=0, exact=0): - super(CharsNotIn, self).__init__() - self.skipWhitespace = False - self.notChars = notChars - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use " - "Optional(CharsNotIn()) if zero-length char group is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = (self.minLen == 0) - self.mayIndexError = False - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] in self.notChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - notchars = self.notChars - maxlen = min(start + self.maxLen, len(instring)) - while loc < maxlen and instring[loc] not in notchars: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__(self): - try: - return super(CharsNotIn, self).__str__() - except Exception: - pass - - if self.strRepr is None: - if len(self.notChars) > 4: - self.strRepr = "!W:(%s...)" % self.notChars[:4] - else: - self.strRepr = "!W:(%s)" % self.notChars - - return self.strRepr - -class White(Token): - """Special matching class for matching whitespace. Normally, - whitespace is ignored by pyparsing grammars. This class is included - when some whitespace structures are significant. Define with - a string containing the whitespace characters to be matched; default - is ``" \\t\\r\\n"``. Also takes optional ``min``, - ``max``, and ``exact`` arguments, as defined for the - :class:`Word` class. - """ - whiteStrs = { - ' ' : '<SP>', - '\t': '<TAB>', - '\n': '<LF>', - '\r': '<CR>', - '\f': '<FF>', - u'\u00A0': '<NBSP>', - u'\u1680': '<OGHAM_SPACE_MARK>', - u'\u180E': '<MONGOLIAN_VOWEL_SEPARATOR>', - u'\u2000': '<EN_QUAD>', - u'\u2001': '<EM_QUAD>', - u'\u2002': '<EN_SPACE>', - u'\u2003': '<EM_SPACE>', - u'\u2004': '<THREE-PER-EM_SPACE>', - u'\u2005': '<FOUR-PER-EM_SPACE>', - u'\u2006': '<SIX-PER-EM_SPACE>', - u'\u2007': '<FIGURE_SPACE>', - u'\u2008': '<PUNCTUATION_SPACE>', - u'\u2009': '<THIN_SPACE>', - u'\u200A': '<HAIR_SPACE>', - u'\u200B': '<ZERO_WIDTH_SPACE>', - u'\u202F': '<NNBSP>', - u'\u205F': '<MMSP>', - u'\u3000': '<IDEOGRAPHIC_SPACE>', - } - def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White, self).__init__() - self.matchWhite = ws - self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite)) - # ~ self.leaveWhitespace() - self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def parseImpl(self, instring, loc, doActions=True): - if instring[loc] not in self.matchWhite: - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min(maxloc, len(instring)) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _PositionToken(Token): - def __init__(self): - super(_PositionToken, self).__init__() - self.name = self.__class__.__name__ - self.mayReturnEmpty = True - self.mayIndexError = False - -class GoToColumn(_PositionToken): - """Token to advance to a specific column of input text; useful for - tabular report scraping. - """ - def __init__(self, colno): - super(GoToColumn, self).__init__() - self.col = colno - - def preParse(self, instring, loc): - if col(loc, instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables(instring, loc) - while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col: - loc += 1 - return loc - - def parseImpl(self, instring, loc, doActions=True): - thiscol = col(loc, instring) - if thiscol > self.col: - raise ParseException(instring, loc, "Text not in expected column", self) - newloc = loc + self.col - thiscol - ret = instring[loc: newloc] - return newloc, ret - - -class LineStart(_PositionToken): - r"""Matches if current position is at the beginning of a line within - the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).searchString(test): - print(t) - - prints:: - - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - def __init__(self): - super(LineStart, self).__init__() - self.errmsg = "Expected start of line" - - def parseImpl(self, instring, loc, doActions=True): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - -class LineEnd(_PositionToken): - """Matches if current position is at the end of a line within the - parse string - """ - def __init__(self): - super(LineEnd, self).__init__() - self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", "")) - self.errmsg = "Expected end of line" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - if instring[loc] == "\n": - return loc + 1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class StringStart(_PositionToken): - """Matches if current position is at the beginning of the parse - string - """ - def __init__(self): - super(StringStart, self).__init__() - self.errmsg = "Expected start of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse(instring, 0): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class StringEnd(_PositionToken): - """Matches if current position is at the end of the parse string - """ - def __init__(self): - super(StringEnd, self).__init__() - self.errmsg = "Expected end of text" - - def parseImpl(self, instring, loc, doActions=True): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc + 1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class WordStart(_PositionToken): - """Matches if the current position is at the beginning of a Word, - and is not preceded by any character in a given set of - ``wordChars`` (default= ``printables``). To emulate the - ``\b`` behavior of regular expressions, use - ``WordStart(alphanums)``. ``WordStart`` will also match at - the beginning of the string being parsed, or at the beginning of - a line. - """ - def __init__(self, wordChars=printables): - super(WordStart, self).__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True): - if loc != 0: - if (instring[loc - 1] in self.wordChars - or instring[loc] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class WordEnd(_PositionToken): - """Matches if the current position is at the end of a Word, and is - not followed by any character in a given set of ``wordChars`` - (default= ``printables``). To emulate the ``\b`` behavior of - regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` - will also match at the end of the string being parsed, or at the end - of a line. - """ - def __init__(self, wordChars=printables): - super(WordEnd, self).__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True): - instrlen = len(instring) - if instrlen > 0 and loc < instrlen: - if (instring[loc] in self.wordChars or - instring[loc - 1] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and - post-processing parsed tokens. - """ - def __init__(self, exprs, savelist=False): - super(ParseExpression, self).__init__(savelist) - if isinstance(exprs, _generatorType): - exprs = list(exprs) - - if isinstance(exprs, basestring): - self.exprs = [self._literalStringClass(exprs)] - elif isinstance(exprs, ParserElement): - self.exprs = [exprs] - elif isinstance(exprs, Iterable): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if any(isinstance(expr, basestring) for expr in exprs): - exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs) - self.exprs = list(exprs) - else: - try: - self.exprs = list(exprs) - except TypeError: - self.exprs = [exprs] - self.callPreparse = False - - def append(self, other): - self.exprs.append(other) - self.strRepr = None - return self - - def leaveWhitespace(self): - """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on - all contained expressions.""" - self.skipWhitespace = False - self.exprs = [e.copy() for e in self.exprs] - for e in self.exprs: - e.leaveWhitespace() - return self - - def ignore(self, other): - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super(ParseExpression, self).ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - else: - super(ParseExpression, self).ignore(other) - for e in self.exprs: - e.ignore(self.ignoreExprs[-1]) - return self - - def __str__(self): - try: - return super(ParseExpression, self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs)) - return self.strRepr - - def streamline(self): - super(ParseExpression, self).streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d) - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for Or's and MatchFirst's) - if len(self.exprs) == 2: - other = self.exprs[0] - if (isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug): - self.exprs = other.exprs[:] + [self.exprs[1]] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if (isinstance(other, self.__class__) - and not other.parseAction - and other.resultsName is None - and not other.debug): - self.exprs = self.exprs[:-1] + other.exprs[:] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + _ustr(self) - - return self - - def validate(self, validateTrace=None): - tmp = (validateTrace if validateTrace is not None else [])[:] + [self] - for e in self.exprs: - e.validate(tmp) - self.checkRecursion([]) - - def copy(self): - ret = super(ParseExpression, self).copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_ungrouped_named_tokens_in_collection: - for e in self.exprs: - if isinstance(e, ParserElement) and e.resultsName: - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName), - stacklevel=3) - - return super(ParseExpression, self)._setResultsName(name, listAllMatches) - - -class And(ParseExpression): - """ - Requires all given :class:`ParseExpression` s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the ``'+'`` operator. - May also be constructed using the ``'-'`` operator, which will - suppress backtracking. - - Example:: - - integer = Word(nums) - name_expr = OneOrMore(Word(alphas)) - - expr = And([integer("id"), name_expr("name"), integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super(And._ErrorStop, self).__init__(*args, **kwargs) - self.name = '-' - self.leaveWhitespace() - - def __init__(self, exprs, savelist=True): - exprs = list(exprs) - if exprs and Ellipsis in exprs: - tmp = [] - for i, expr in enumerate(exprs): - if expr is Ellipsis: - if i < len(exprs) - 1: - skipto_arg = (Empty() + exprs[i + 1]).exprs[-1] - tmp.append(SkipTo(skipto_arg)("_skipped*")) - else: - raise Exception("cannot construct And with sequence ending in ...") - else: - tmp.append(expr) - exprs[:] = tmp - super(And, self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.setWhitespaceChars(self.exprs[0].whiteChars) - self.skipWhitespace = self.exprs[0].skipWhitespace - self.callPreparse = True - - def streamline(self): - # collapse any _PendingSkip's - if self.exprs: - if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip) - for e in self.exprs[:-1]): - for i, e in enumerate(self.exprs[:-1]): - if e is None: - continue - if (isinstance(e, ParseExpression) - and e.exprs and isinstance(e.exprs[-1], _PendingSkip)): - e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] - self.exprs[i + 1] = None - self.exprs = [e for e in self.exprs if e is not None] - - super(And, self).streamline() - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - # pass False as last arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False) - errorStop = False - for e in self.exprs[1:]: - if isinstance(e, And._ErrorStop): - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse(instring, loc, doActions) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException(instring, len(instring), self.errmsg, self) - else: - loc, exprtokens = e._parse(instring, loc, doActions) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # And([self, other]) - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - if not e.mayReturnEmpty: - break - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - -class Or(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the expression that matches the longest - string will be used. May be constructed using the ``'^'`` - operator. - - Example:: - - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) - - prints:: - - [['123'], ['3.1416'], ['789']] - """ - def __init__(self, exprs, savelist=False): - super(Or, self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self): - super(Or, self).streamline() - if __compat__.collect_all_And_tokens: - self.saveAsList = any(e.saveAsList for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - matches = [] - for e in self.exprs: - try: - loc2 = e.tryParse(instring, loc) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring, len(instring), e.errmsg, self) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - # re-evaluate all matches in descending order of length of match, in case attached actions - # might change whether or how much they match of the input. - matches.sort(key=itemgetter(0), reverse=True) - - if not doActions: - # no further conditions or parse actions to change the selection of - # alternative, so the first match will be the best match - best_expr = matches[0][1] - return best_expr._parse(instring, loc, doActions) - - longest = -1, None - for loc1, expr1 in matches: - if loc1 <= longest[0]: - # already have a longer match than this one will deliver, we are done - return longest - - try: - loc2, toks = expr1._parse(instring, loc, doActions) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - else: - if loc2 >= loc1: - return loc2, toks - # didn't match as much as before - elif loc2 > longest[0]: - longest = loc2, toks - - if longest != (-1, None): - return longest - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - - def __ixor__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # Or([self, other]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - def _setResultsName(self, name, listAllMatches=False): - if (not __compat__.collect_all_And_tokens - and __diag__.warn_multiple_tokens_in_named_alternation): - if any(isinstance(e, And) for e in self.exprs): - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "may only return a single token for an And alternative, " - "in future will return the full list of tokens".format( - "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), - stacklevel=3) - - return super(Or, self)._setResultsName(name, listAllMatches) - - -class MatchFirst(ParseExpression): - """Requires that at least one :class:`ParseExpression` is found. If - two expressions match, the first one listed is the one that will - match. May be constructed using the ``'|'`` operator. - - Example:: - - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - def __init__(self, exprs, savelist=False): - super(MatchFirst, self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def streamline(self): - super(MatchFirst, self).streamline() - if __compat__.collect_all_And_tokens: - self.saveAsList = any(e.saveAsList for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - maxExcLoc = -1 - maxException = None - for e in self.exprs: - try: - ret = e._parse(instring, loc, doActions) - return ret - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring, len(instring), e.errmsg, self) - maxExcLoc = len(instring) - - # only got here if no expression matched, raise exception for match that made it the furthest - else: - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - return self.append(other) # MatchFirst([self, other]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - def _setResultsName(self, name, listAllMatches=False): - if (not __compat__.collect_all_And_tokens - and __diag__.warn_multiple_tokens_in_named_alternation): - if any(isinstance(e, And) for e in self.exprs): - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "may only return a single token for an And alternative, " - "in future will return the full list of tokens".format( - "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), - stacklevel=3) - - return super(MatchFirst, self)._setResultsName(name, listAllMatches) - - -class Each(ParseExpression): - """Requires all given :class:`ParseExpression` s to be found, but in - any order. Expressions may be separated by whitespace. - - May be constructed using the ``'&'`` operator. - - Example:: - - color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) - - shape_spec.runTests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - - prints:: - - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - def __init__(self, exprs, savelist=True): - super(Each, self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = True - self.initExprGroups = True - self.saveAsList = True - - def streamline(self): - super(Each, self).streamline() - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - return self - - def parseImpl(self, instring, loc, doActions=True): - if self.initExprGroups: - self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional)) - opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)] - opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, (Optional, Regex))] - self.optionals = opt1 + opt2 - self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)] - self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)] - self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))] - self.required += self.multirequired - self.initExprGroups = False - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - matchOrder = [] - - keepMatching = True - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired - failed = [] - for e in tmpExprs: - try: - tmpLoc = e.tryParse(instring, tmpLoc) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e), e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - if tmpReqd: - missing = ", ".join(_ustr(e) for e in tmpReqd) - raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing) - - # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt] - - resultlist = [] - for e in matchOrder: - loc, results = e._parse(instring, loc, doActions) - resultlist.append(results) - - finalResults = sum(resultlist, ParseResults([])) - return loc, finalResults - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion(self, parseElementList): - subRecCheckList = parseElementList[:] + [self] - for e in self.exprs: - e.checkRecursion(subRecCheckList) - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of :class:`ParserElement`, for combining and - post-processing parsed tokens. - """ - def __init__(self, expr, savelist=False): - super(ParseElementEnhance, self).__init__(savelist) - if isinstance(expr, basestring): - if issubclass(self._literalStringClass, Token): - expr = self._literalStringClass(expr) - else: - expr = self._literalStringClass(Literal(expr)) - self.expr = expr - self.strRepr = None - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars(expr.whiteChars) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr is not None: - return self.expr._parse(instring, loc, doActions, callPreParse=False) - else: - raise ParseException("", loc, self.errmsg, self) - - def leaveWhitespace(self): - self.skipWhitespace = False - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leaveWhitespace() - return self - - def ignore(self, other): - if isinstance(other, Suppress): - if other not in self.ignoreExprs: - super(ParseElementEnhance, self).ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - else: - super(ParseElementEnhance, self).ignore(other) - if self.expr is not None: - self.expr.ignore(self.ignoreExprs[-1]) - return self - - def streamline(self): - super(ParseElementEnhance, self).streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def checkRecursion(self, parseElementList): - if self in parseElementList: - raise RecursiveGrammarException(parseElementList + [self]) - subRecCheckList = parseElementList[:] + [self] - if self.expr is not None: - self.expr.checkRecursion(subRecCheckList) - - def validate(self, validateTrace=None): - if validateTrace is None: - validateTrace = [] - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__(self): - try: - return super(ParseElementEnhance, self).__str__() - except Exception: - pass - - if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr)) - return self.strRepr - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. - ``FollowedBy`` does *not* advance the parsing position within - the input string, it only verifies that the specified parse - expression matches at the current position. ``FollowedBy`` - always returns a null token list. If any results names are defined - in the lookahead expression, those *will* be returned for access by - name. - - Example:: - - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() - - prints:: - - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - def __init__(self, expr): - super(FollowedBy, self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - # by using self._expr.parse and deleting the contents of the returned ParseResults list - # we keep any named results that were defined in the FollowedBy expression - _, ret = self.expr._parse(instring, loc, doActions=doActions) - del ret[:] - - return loc, ret - - -class PrecededBy(ParseElementEnhance): - """Lookbehind matching of the given parse expression. - ``PrecededBy`` does not advance the parsing position within the - input string, it only verifies that the specified parse expression - matches prior to the current position. ``PrecededBy`` always - returns a null token list, but if a results name is defined on the - given expression, it is returned. - - Parameters: - - - expr - expression that must match prior to the current parse - location - - retreat - (default= ``None``) - (int) maximum number of characters - to lookbehind prior to the current parse location - - If the lookbehind expression is a string, Literal, Keyword, or - a Word or CharsNotIn with a specified exact or maximum length, then - the retreat parameter is not required. Otherwise, retreat must be - specified to give a maximum number of characters to look back from - the current parse position for a lookbehind match. - - Example:: - - # VB-style variable names with type prefixes - int_var = PrecededBy("#") + pyparsing_common.identifier - str_var = PrecededBy("$") + pyparsing_common.identifier - - """ - def __init__(self, expr, retreat=None): - super(PrecededBy, self).__init__(expr) - self.expr = self.expr().leaveWhitespace() - self.mayReturnEmpty = True - self.mayIndexError = False - self.exact = False - if isinstance(expr, str): - retreat = len(expr) - self.exact = True - elif isinstance(expr, (Literal, Keyword)): - retreat = expr.matchLen - self.exact = True - elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: - retreat = expr.maxLen - self.exact = True - elif isinstance(expr, _PositionToken): - retreat = 0 - self.exact = True - self.retreat = retreat - self.errmsg = "not preceded by " + str(expr) - self.skipWhitespace = False - self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) - - def parseImpl(self, instring, loc=0, doActions=True): - if self.exact: - if loc < self.retreat: - raise ParseException(instring, loc, self.errmsg) - start = loc - self.retreat - _, ret = self.expr._parse(instring, start) - else: - # retreat specified a maximum lookbehind window, iterate - test_expr = self.expr + StringEnd() - instring_slice = instring[max(0, loc - self.retreat):loc] - last_expr = ParseException(instring, loc, self.errmsg) - for offset in range(1, min(loc, self.retreat + 1)+1): - try: - # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) - _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) - except ParseBaseException as pbe: - last_expr = pbe - else: - break - else: - raise last_expr - return loc, ret - - -class NotAny(ParseElementEnhance): - """Lookahead to disallow matching with the given parse expression. - ``NotAny`` does *not* advance the parsing position within the - input string, it only verifies that the specified parse expression - does *not* match at the current position. Also, ``NotAny`` does - *not* skip over leading whitespace. ``NotAny`` always returns - a null token list. May be constructed using the '~' operator. - - Example:: - - AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) - - # take care not to mistake keywords for identifiers - ident = ~(AND | OR | NOT) + Word(alphas) - boolean_term = Optional(NOT) + ident - - # very crude boolean expression - to support parenthesis groups and - # operation hierarchy, use infixNotation - boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) - - # integers that are followed by "." are actually floats - integer = Word(nums) + ~Char(".") - """ - def __init__(self, expr): - super(NotAny, self).__init__(expr) - # ~ self.leaveWhitespace() - self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, " + _ustr(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - if self.expr.canParseNext(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "~{" + _ustr(self.expr) + "}" - - return self.strRepr - -class _MultipleMatch(ParseElementEnhance): - def __init__(self, expr, stopOn=None): - super(_MultipleMatch, self).__init__(expr) - self.saveAsList = True - ender = stopOn - if isinstance(ender, basestring): - ender = self._literalStringClass(ender) - self.stopOn(ender) - - def stopOn(self, ender): - if isinstance(ender, basestring): - ender = self._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - return self - - def parseImpl(self, instring, loc, doActions=True): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False) - try: - hasIgnoreExprs = (not not self.ignoreExprs) - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables(instring, loc) - else: - preloc = loc - loc, tmptokens = self_expr_parse(instring, preloc, doActions) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException, IndexError): - pass - - return loc, tokens - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_ungrouped_named_tokens_in_collection: - for e in [self.expr] + getattr(self.expr, 'exprs', []): - if isinstance(e, ParserElement) and e.resultsName: - warnings.warn("{0}: setting results name {1!r} on {2} expression " - "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", - name, - type(self).__name__, - e.resultsName), - stacklevel=3) - - return super(_MultipleMatch, self)._setResultsName(name, listAllMatches) - - -class OneOrMore(_MultipleMatch): - """Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stopOn - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stopOn attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parseString(text).pprint() - """ - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." - - return self.strRepr - -class ZeroOrMore(_MultipleMatch): - """Optional repetition of zero or more of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - stopOn - (default= ``None``) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example: similar to :class:`OneOrMore` - """ - def __init__(self, expr, stopOn=None): - super(ZeroOrMore, self).__init__(expr, stopOn=stopOn) - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) - except (ParseException, IndexError): - return loc, [] - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." - - return self.strRepr - - -class _NullToken(object): - def __bool__(self): - return False - __nonzero__ = __bool__ - def __str__(self): - return "" - -class Optional(ParseElementEnhance): - """Optional matching of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - default (optional) - value to be returned if the optional expression is not found. - - Example:: - - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) - zip.runTests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - - prints:: - - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - __optionalNotMatched = _NullToken() - - def __init__(self, expr, default=__optionalNotMatched): - super(Optional, self).__init__(expr, savelist=False) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl(self, instring, loc, doActions=True): - try: - loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False) - except (ParseException, IndexError): - if self.defaultValue is not self.__optionalNotMatched: - if self.expr.resultsName: - tokens = ParseResults([self.defaultValue]) - tokens[self.expr.resultsName] = self.defaultValue - else: - tokens = [self.defaultValue] - else: - tokens = [] - return loc, tokens - - def __str__(self): - if hasattr(self, "name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]" - - return self.strRepr - -class SkipTo(ParseElementEnhance): - """Token for skipping over all undefined text until the matched - expression is found. - - Parameters: - - expr - target expression marking the end of the data to be skipped - - include - (default= ``False``) if True, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element list). - - ignore - (default= ``None``) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - failOn - (default= ``None``) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the SkipTo is not a match - - Example:: - - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quotedString) - string_data.setParseAction(tokenMap(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.searchString(report): - print tkt.dump() - - prints:: - - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: 6 - - desc: Intermittent system crash - - issue_num: 101 - - sev: Critical - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: 14 - - desc: Spelling error on Login ('log|n') - - issue_num: 94 - - sev: Cosmetic - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: 47 - - desc: System slow when running too many reports - - issue_num: 79 - - sev: Minor - """ - def __init__(self, other, include=False, ignore=None, failOn=None): - super(SkipTo, self).__init__(other) - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.saveAsList = False - if isinstance(failOn, basestring): - self.failOn = self._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for " + _ustr(self.expr) - - def parseImpl(self, instring, loc, doActions=True): - startloc = loc - instrlen = len(instring) - expr = self.expr - expr_parse = self.expr._parse - self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None - self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = expr_parse(instring, loc, doActions, callPreParse=False) - skipresult += mat - - return loc, skipresult - -class Forward(ParseElementEnhance): - """Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the ``Forward`` - variable using the '<<' operator. - - Note: take care when assigning to ``Forward`` not to overlook - precedence of operators. - - Specifically, '|' has a lower precedence than '<<', so that:: - - fwdExpr << a | b | c - - will actually be evaluated as:: - - (fwdExpr << a) | b | c - - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the ``Forward``:: - - fwdExpr << (a | b | c) - - Converting to use the '<<=' operator instead will avoid this problem. - - See :class:`ParseResults.pprint` for an example of a recursive - parser created using ``Forward``. - """ - def __init__(self, other=None): - super(Forward, self).__init__(other, savelist=False) - - def __lshift__(self, other): - if isinstance(other, basestring): - other = self._literalStringClass(other) - self.expr = other - self.strRepr = None - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars(self.expr.whiteChars) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - return self - - def __ilshift__(self, other): - return self << other - - def leaveWhitespace(self): - self.skipWhitespace = False - return self - - def streamline(self): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate(self, validateTrace=None): - if validateTrace is None: - validateTrace = [] - - if self not in validateTrace: - tmp = validateTrace[:] + [self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__(self): - if hasattr(self, "name"): - return self.name - if self.strRepr is not None: - return self.strRepr - - # Avoid infinite recursion by setting a temporary strRepr - self.strRepr = ": ..." - - # Use the string representation of main expression. - retString = '...' - try: - if self.expr is not None: - retString = _ustr(self.expr)[:1000] - else: - retString = "None" - finally: - self.strRepr = self.__class__.__name__ + ": " + retString - return self.strRepr - - def copy(self): - if self.expr is not None: - return super(Forward, self).copy() - else: - ret = Forward() - ret <<= self - return ret - - def _setResultsName(self, name, listAllMatches=False): - if __diag__.warn_name_set_on_empty_Forward: - if self.expr is None: - warnings.warn("{0}: setting results name {0!r} on {1} expression " - "that has no contained expression".format("warn_name_set_on_empty_Forward", - name, - type(self).__name__), - stacklevel=3) - - return super(Forward, self)._setResultsName(name, listAllMatches) - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of :class:`ParseExpression`, for converting parsed results. - """ - def __init__(self, expr, savelist=False): - super(TokenConverter, self).__init__(expr) # , savelist) - self.saveAsList = False - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the - input string; this can be disabled by specifying - ``'adjacent=False'`` in the constructor. - - Example:: - - real = Word(nums) + '.' + Word(nums) - print(real.parseString('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parseString('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) - """ - def __init__(self, expr, joinString="", adjacent=True): - super(Combine, self).__init__(expr) - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leaveWhitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore(self, other): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super(Combine, self).ignore(other) - return self - - def postParse(self, instring, loc, tokenlist): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults) - - if self.resultsName and retToks.haskeys(): - return [retToks] - else: - return retToks - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for - returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. - - Example:: - - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Optional(delimitedList(term)) - print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Optional(delimitedList(term))) - print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']] - """ - def __init__(self, expr): - super(Group, self).__init__(expr) - self.saveAsList = True - - def postParse(self, instring, loc, tokenlist): - return [tokenlist] - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also - as a dictionary. Each element can also be referenced using the first - token in the expression as its key. Useful for tabular report - scraping when the first column can be used as a item key. - - Example:: - - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - # print attributes as plain groups - print(OneOrMore(attr_expr).parseString(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names - result = Dict(OneOrMore(Group(attr_expr))).parseString(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.asDict()) - - prints:: - - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - - See more examples at :class:`ParseResults` of accessing fields by results name. - """ - def __init__(self, expr): - super(Dict, self).__init__(expr) - self.saveAsList = True - - def postParse(self, instring, loc, tokenlist): - for i, tok in enumerate(tokenlist): - if len(tok) == 0: - continue - ikey = tok[0] - if isinstance(ikey, int): - ikey = _ustr(tok[0]).strip() - if len(tok) == 1: - tokenlist[ikey] = _ParseResultsWithOffset("", i) - elif len(tok) == 2 and not isinstance(tok[1], ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) - else: - dictvalue = tok.copy() # ParseResults(i) - del dictvalue[0] - if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) - - if self.resultsName: - return [tokenlist] - else: - return tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression. - - Example:: - - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + ZeroOrMore(',' + wd) - print(wd_list1.parseString(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) - print(wd_list2.parseString(source)) - - prints:: - - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - - (See also :class:`delimitedList`.) - """ - def postParse(self, instring, loc, tokenlist): - return [] - - def suppress(self): - return self - - -class OnlyOnce(object): - """Wrapper for parse actions, to ensure they are only called once. - """ - def __init__(self, methodCall): - self.callable = _trim_arity(methodCall) - self.called = False - def __call__(self, s, l, t): - if not self.called: - results = self.callable(s, l, t) - self.called = True - return results - raise ParseException(s, l, "") - def reset(self): - self.called = False - -def traceParseAction(f): - """Decorator for debugging parse actions. - - When the parse action is called, this decorator will print - ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``. - When the parse action completes, the decorator will print - ``"<<"`` followed by the returned value, or any exception that the parse action raised. - - Example:: - - wd = Word(alphas) - - @traceParseAction - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens)))) - - wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) - print(wds.parseString("slkdjs sld sldd sdlf sdljf")) - - prints:: - - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - <<leaving remove_duplicate_chars (ret: 'dfjkls') - ['dfjkls'] - """ - f = _trim_arity(f) - def z(*paArgs): - thisFunc = f.__name__ - s, l, t = paArgs[-3:] - if len(paArgs) > 3: - thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t)) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write("<<leaving %s (exception: %s)\n" % (thisFunc, exc)) - raise - sys.stderr.write("<<leaving %s (ret: %r)\n" % (thisFunc, ret)) - return ret - try: - z.__name__ = f.__name__ - except AttributeError: - pass - return z - -# -# global helpers -# -def delimitedList(expr, delim=",", combine=False): - """Helper to define a delimited list of expressions - the delimiter - defaults to ','. By default, the list elements and delimiters can - have intervening whitespace, and comments, but this can be - overridden by passing ``combine=True`` in the constructor. If - ``combine`` is set to ``True``, the matching tokens are - returned as a single token string, with the delimiters included; - otherwise, the matching tokens are returned as a list of tokens, - with the delimiters suppressed. - - Example:: - - delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] - delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..." - if combine: - return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName) - else: - return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName) - -def countedArray(expr, intExpr=None): - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``intExpr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) - countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] - """ - arrayExpr = Forward() - def countFieldParseAction(s, l, t): - n = t[0] - arrayExpr << (n and Group(And([expr] * n)) or Group(empty)) - return [] - if intExpr is None: - intExpr = Word(nums).setParseAction(lambda t: int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.setName("arrayLen") - intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...') - -def _flatten(L): - ret = [] - for i in L: - if isinstance(i, list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret - -def matchPreviousLiteral(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = matchPreviousLiteral(first) - matchExpr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`matchPreviousExpr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - def copyTokenToRepeater(s, l, t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.asList()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def matchPreviousExpr(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = matchPreviousExpr(first) - matchExpr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - def copyTokenToRepeater(s, l, t): - matchTokens = _flatten(t.asList()) - def mustMatchTheseTokens(s, l, t): - theseTokens = _flatten(t.asList()) - if theseTokens != matchTokens: - raise ParseException('', 0, '') - rep.setParseAction(mustMatchTheseTokens, callDuringTry=True) - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def _escapeRegexRangeChars(s): - # ~ escape these chars: ^-[] - for c in r"\^-[]": - s = s.replace(c, _bslash + c) - s = s.replace("\n", r"\n") - s = s.replace("\t", r"\t") - return _ustr(s) - -def oneOf(strs, caseless=False, useRegex=True, asKeyword=False): - """Helper to quickly define a set of alternative Literals, and makes - sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - strs - a string of space-delimited literals, or a collection of - string literals - - caseless - (default= ``False``) - treat all literals as - caseless - - useRegex - (default= ``True``) - as an optimization, will - generate a Regex object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if - creating a :class:`Regex` raises an exception) - - asKeyword - (default=``False``) - enforce Keyword-style matching on the - generated expressions - - Example:: - - comp_oper = oneOf("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - if isinstance(caseless, basestring): - warnings.warn("More than one string argument passed to oneOf, pass " - "choices as a list or space-delimited string", stacklevel=2) - - if caseless: - isequal = (lambda a, b: a.upper() == b.upper()) - masks = (lambda a, b: b.upper().startswith(a.upper())) - parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral - else: - isequal = (lambda a, b: a == b) - masks = (lambda a, b: b.startswith(a)) - parseElementClass = Keyword if asKeyword else Literal - - symbols = [] - if isinstance(strs, basestring): - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - warnings.warn("Invalid argument to oneOf, expected string or iterable", - SyntaxWarning, stacklevel=2) - if not symbols: - return NoMatch() - - if not asKeyword: - # if not producing keywords, need to reorder to take care to avoid masking - # longer choices with shorter ones - i = 0 - while i < len(symbols) - 1: - cur = symbols[i] - for j, other in enumerate(symbols[i + 1:]): - if isequal(other, cur): - del symbols[i + j + 1] - break - elif masks(cur, other): - del symbols[i + j + 1] - symbols.insert(i, other) - break - else: - i += 1 - - if not (caseless or asKeyword) and useRegex: - # ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols])) - try: - if len(symbols) == len("".join(symbols)): - return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(' | '.join(symbols)) - else: - return Regex("|".join(re.escape(sym) for sym in symbols)).setName(' | '.join(symbols)) - except Exception: - warnings.warn("Exception creating Regex for oneOf, building MatchFirst", - SyntaxWarning, stacklevel=2) - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) - -def dictOf(key, value): - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - print(OneOrMore(attr_expr).parseString(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) - - # similar to Dict, but simpler call format - result = dictOf(attr_label, attr_value).parseString(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.asDict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - -def originalTextFor(expr, asString=True): - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns astring containing the original parsed text. - - If the optional ``asString`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`originalTextFor` contains expressions with defined - results names, you must set ``asString`` to ``False`` if you - want to preserve those results name values. - - Example:: - - src = "this is test <b> bold <i>text</i> </b> normal text " - for tag in ("b", "i"): - opener, closer = makeHTMLTags(tag) - patt = originalTextFor(opener + SkipTo(closer) + closer) - print(patt.searchString(src)[0]) - - prints:: - - ['<b> bold <i>text</i> </b>'] - ['<i>text</i>'] - """ - locMarker = Empty().setParseAction(lambda s, loc, t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s, l, t: s[t._original_start: t._original_end] - else: - def extractText(s, l, t): - t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] - matchExpr.setParseAction(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - return matchExpr - -def ungroup(expr): - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).addParseAction(lambda t: t[0]) - -def locatedExpr(expr): - """Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - locn_start = location where matched expression begins - - locn_end = location where matched expression ends - - value = the actual parsed results - - Be careful if the input text contains ``<TAB>`` characters, you - may want to call :class:`ParserElement.parseWithTabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().setParseAction(lambda s, l, t: l) - return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) - - -# convenience constants for positional expressions -empty = Empty().setName("empty") -lineStart = LineStart().setName("lineStart") -lineEnd = LineEnd().setName("lineEnd") -stringStart = StringStart().setName("stringStart") -stringEnd = StringEnd().setName("stringEnd") - -_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1]) -_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r'\0x'), 16))) -_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) -_charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]" - -def srange(s): - r"""Helper to easily define string ranges for use in Word - construction. Borrows syntax from regexp '[]' string range - definitions:: - - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - - The input string must be enclosed in []'s, and the returned string - is the expanded character set joined into a single string. The - values enclosed in the []'s may be: - - - a single character - - an escaped character with a leading backslash (such as ``\-`` - or ``\]``) - - an escaped hex character with a leading ``'\x'`` - (``\x21``, which is a ``'!'`` character) (``\0x##`` - is also supported for backwards compatibility) - - an escaped octal character with a leading ``'\0'`` - (``\041``, which is a ``'!'`` character) - - a range of any of the above, separated by a dash (``'a-z'``, - etc.) - - any combination of the above (``'aeiouy'``, - ``'a-zA-Z0-9_$'``, etc.) - """ - _expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) - except Exception: - return "" - -def matchOnlyAtCol(n): - """Helper method for defining parse actions that require matching at - a specific column in the input text. - """ - def verifyCol(strg, locn, toks): - if col(locn, strg) != n: - raise ParseException(strg, locn, "matched token not at column %d" % n) - return verifyCol - -def replaceWith(replStr): - """Helper method for common parse actions that simply return - a literal value. Especially useful when used with - :class:`transformString<ParserElement.transformString>` (). - - Example:: - - num = Word(nums).setParseAction(lambda toks: int(toks[0])) - na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) - term = na | num - - OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s, l, t: [replStr] - -def removeQuotes(s, l, t): - """Helper parse action for removing quotation marks from parsed - quoted strings. - - Example:: - - # by default, quotation marks are included in parsed results - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use removeQuotes to strip quotation marks from parsed results - quotedString.setParseAction(removeQuotes) - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - -def tokenMap(func, *args): - """Helper to define a parse action by mapping a function to all - elements of a ParseResults list. If any additional args are passed, - they are forwarded to the given function as additional arguments - after the token, as in - ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``, - which will convert the parsed data to an integer using base 16. - - Example (compare the last to example in :class:`ParserElement.transformString`:: - - hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) - hex_ints.runTests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).setParseAction(tokenMap(str.upper)) - OneOrMore(upperword).runTests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).setParseAction(tokenMap(str.title)) - OneOrMore(wd).setParseAction(' '.join).runTests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - - prints:: - - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - def pa(s, l, t): - return [func(tokn, *args) for tokn in t] - - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - pa.__name__ = func_name - - return pa - -upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) -"""(Deprecated) Helper parse action to convert tokens to upper case. -Deprecated in favor of :class:`pyparsing_common.upcaseTokens`""" - -downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) -"""(Deprecated) Helper parse action to convert tokens to lower case. -Deprecated in favor of :class:`pyparsing_common.downcaseTokens`""" - -def _makeTags(tagStr, xml, - suppress_LT=Suppress("<"), - suppress_GT=Suppress(">")): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr, basestring): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas, alphanums + "_-:") - if xml: - tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes) - openTag = (suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) - + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') - + suppress_GT) - else: - tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">") - openTag = (suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens) - + Optional(Suppress("=") + tagAttrValue)))) - + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') - + suppress_GT) - closeTag = Combine(_L("</") + tagStr + ">", adjacent=False) - - openTag.setName("<%s>" % resname) - # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels - openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy())) - closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("</%s>" % resname) - openTag.tag = resname - closeTag.tag = resname - openTag.tag_body = SkipTo(closeTag()) - return openTag, closeTag - -def makeHTMLTags(tagStr): - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' - # makeHTMLTags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a, a_end = makeHTMLTags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.searchString(text): - # attributes in the <A> tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags(tagStr, False) - -def makeXMLTags(tagStr): - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`makeHTMLTags` - """ - return _makeTags(tagStr, True) - -def withAttribute(*args, **attrDict): - """Helper to create a validating parse action to be used with start - tags created with :class:`makeXMLTags` or - :class:`makeHTMLTags`. Use ``withAttribute`` to qualify - a starting tag with a required attribute value, to avoid false - matches on common tags such as ``<TD>`` or ``<DIV>``. - - Call ``withAttribute`` with a series of attribute names and - values. Specify the list of filter attributes names and values as: - - - keyword arguments, as in ``(align="right")``, or - - as an explicit dict with ``**`` operator, when an attribute - name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` - - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` - - For attribute names with a namespace prefix, you must use the second - form. Attribute names are matched insensitive to upper/lower case. - - If just testing for ``class`` (with or without a namespace), use - :class:`withClass`. - - To verify that the attribute exists, but without specifying a value, - pass ``withAttribute.ANY_VALUE`` as the value. - - Example:: - - html = ''' - <div> - Some text - <div type="grid">1 4 0 1 0</div> - <div type="graph">1,3 2,3 1,1</div> - <div>this has no type</div> - </div> - - ''' - div,div_end = makeHTMLTags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().setParseAction(withAttribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k, v) for k, v in attrs] - def pa(s, l, tokens): - for attrName, attrValue in attrs: - if attrName not in tokens: - raise ParseException(s, l, "no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -def withClass(classname, namespace=''): - """Simplified version of :class:`withAttribute` when - matching on a div class - made difficult because ``class`` is - a reserved word in Python. - - Example:: - - html = ''' - <div> - Some text - <div class="grid">1 4 0 1 0</div> - <div class="graph">1,3 2,3 1,1</div> - <div>this <div> has no class</div> - </div> - - ''' - div,div_end = makeHTMLTags("div") - div_grid = div().setParseAction(withClass("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - - prints:: - - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr: classname}) - -opAssoc = SimpleNamespace() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')): - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infixNotation. See - :class:`ParserElement.enablePackrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - baseExpr - expression representing the most basic element for the - nested - - opList - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(opExpr, - numTerms, rightLeftAssoc, parseAction)``, where: - - - opExpr is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if numTerms - is 3, opExpr is a tuple of two expressions, for the two - operators separating the 3 terms - - numTerms is the number of terms for this operator (must be 1, - 2, or 3) - - rightLeftAssoc is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``opAssoc.RIGHT`` and ``opAssoc.LEFT``. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``setParseAction(*fn)`` - (:class:`ParserElement.setParseAction`) - - lpar - expression for matching left-parentheses - (default= ``Suppress('(')``) - - rpar - expression for matching right-parentheses - (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infixNotation(integer | varname, - [ - ('-', 1, opAssoc.RIGHT), - (oneOf('* /'), 2, opAssoc.LEFT), - (oneOf('+ -'), 2, opAssoc.LEFT), - ]) - - arith_expr.runTests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', fullDump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.tryParse(instring, loc) - return loc, [] - - ret = Forward() - lastExpr = baseExpr | (lpar + ret + rpar) - for i, operDef in enumerate(opList): - opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4] - termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward().setName(termName) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr)) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr)) - else: - matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr)) - elif arity == 3: - matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) - + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr)) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr)) - elif arity == 3: - matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) - + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.setParseAction(*pa) - else: - matchExpr.setParseAction(pa) - thisExpr <<= (matchExpr.setName(termName) | lastExpr) - lastExpr = thisExpr - ret <<= lastExpr - return ret - -operatorPrecedence = infixNotation -"""(Deprecated) Former name of :class:`infixNotation`, will be -dropped in a future release.""" - -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' - | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """Helper method for defining nested lists enclosed in opening and - closing delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - closer - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - content - expression for items within the nested lists - (default= ``None``) - - ignoreExpr - expression for ignoring opening and closing - delimiters (default= :class:`quotedString`) - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignoreExpr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quotedString or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quotedString`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = oneOf("void int short long char float double") - decl_data_type = Combine(data_type + Optional(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR, RPAR = map(Suppress, "()") - - code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Optional(delimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(cStyleComment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.searchString(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener, basestring) and isinstance(closer, basestring): - if len(opener) == 1 and len(closer) == 1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr - + CharsNotIn(opener - + closer - + ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).setParseAction(lambda t: t[0].strip())) - else: - content = (empty.copy() + CharsNotIn(opener - + closer - + ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t: t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr - + ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) - ).setParseAction(lambda t: t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) - ).setParseAction(lambda t: t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)) - else: - ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.setName('nested %s%s expression' % (opener, closer)) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single - grammar should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond - the current level; set to False for block of left-most - statements (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group(funcDecl + func_body) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << (funcDef | assignment | identifier) - - module_body = OneOrMore(stmt) - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - backup_stack = indentStack[:] - - def reset_stack(): - indentStack[:] = backup_stack - - def checkPeerIndent(s, l, t): - if l >= len(s): return - curCol = col(l, s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseException(s, l, "illegal nesting") - raise ParseException(s, l, "not a peer entry") - - def checkSubIndent(s, l, t): - curCol = col(l, s) - if curCol > indentStack[-1]: - indentStack.append(curCol) - else: - raise ParseException(s, l, "not a subentry") - - def checkUnindent(s, l, t): - if l >= len(s): return - curCol = col(l, s) - if not(indentStack and curCol in indentStack): - raise ParseException(s, l, "not an unindent") - if curCol < indentStack[-1]: - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress(), stopOn=StringEnd()) - INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') - PEER = Empty().setParseAction(checkPeerIndent).setName('') - UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') - if indent: - smExpr = Group(Optional(NL) - + INDENT - + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) - + UNDENT) - else: - smExpr = Group(Optional(NL) - + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) - + UNDENT) - smExpr.setFailAction(lambda a, b, c, d: reset_stack()) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.setName('indented block') - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), '><& "\'')) -commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") -def replaceHTMLEntity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") -"Comment of the form ``/* ... */``" - -htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") -"Comment of the form ``<!-- ... -->``" - -restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") -dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") -"Comment of the form ``// ... (to end of line)``" - -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment") -"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" - -javaStyleComment = cppStyleComment -"Same as :class:`cppStyleComment`" - -pythonStyleComment = Regex(r"#.*").setName("Python style comment") -"Comment of the form ``# ... (to end of line)``" - -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') - + Optional(Word(" \t") - + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem") -commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList") -"""(Deprecated) Predefined expression of 1 or more printable words or -quoted strings, separated by commas. - -This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`. -""" - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers<integer>`, :class:`reals<real>`, - :class:`scientific notation<sci_real>`) - - common :class:`programming identifiers<identifier>` - - network addresses (:class:`MAC<mac_address>`, - :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`) - - ISO8601 :class:`dates<iso8601_date>` and - :class:`datetime<iso8601_datetime>` - - :class:`UUID<uuid>` - - :class:`comma-separated list<comma_separated_list>` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convertToInteger = tokenMap(int) - """ - Parse action for converting parsed integers to Python int - """ - - convertToFloat = tokenMap(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).setName("integer").setParseAction(convertToInteger) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16)) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.addParseAction(lambda t: t[0]/t[-1]) - - mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.addParseAction(sum) - - real = Regex(r'[+-]?(?:\d+\.\d*|\.\d+)').setName("real number").setParseAction(convertToFloat) - """expression that parses a floating point number and returns a float""" - - sci_real = Regex(r'[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) - """any int or real number, returned as float""" - - identifier = Word(alphas + '_', alphanums + '_').setName("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part) * 7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) - + "::" - + Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) - ).setName("short IPv6 address") - _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") - ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convertToDate(fmt="%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt).date() - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - @staticmethod - def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() - @staticmethod - def stripHTMLTags(s, l, tokens): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' - td, td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transformString(tokens[0]) - - _commasepitem = Combine(OneOrMore(~Literal(",") - + ~LineEnd() - + Word(printables, excludeChars=',') - + Optional(White(" \t")))).streamline().setName("commaItem") - comma_separated_list = delimitedList(Optional(quotedString.copy() - | _commasepitem, default='') - ).setName("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) - """Parse action to convert tokens to upper case.""" - - downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) - """Parse action to convert tokens to lower case.""" - - -class _lazyclassproperty(object): - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) - for superclass in cls.__mro__[1:]): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -class unicode_set(object): - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``, such as:: - - _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - _ranges = [] - - @classmethod - def _get_chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in cc._ranges: - ret.extend(range(rr[0], rr[-1] + 1)) - return [unichr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges())) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - _ranges = [(32, sys.maxunicode)] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges = [(0x0100, 0x017f),] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges = [(0x0180, 0x024f),] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges = [ - (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d), - (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4), - (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges = [(0x0400, 0x04ff)] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges = [] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges = [(0x3040, 0x309f),] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges = [(0x30a0, 0x30ff),] - - class Korean(unicode_set): - "Unicode set for Korean Unicode Character Range" - _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),] - - class CJK(Chinese, Japanese, Korean): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - pass - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges = [(0x0590, 0x05ff),] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)] - -pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges) - -# define ranges in language character sets -if PY_3: - setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic) - setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese) - setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic) - setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek) - setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew) - setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese) - setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji) - setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana) - setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana) - setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean) - setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai) - setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari) - - -class pyparsing_test: - """ - namespace class for classes useful in writing unit tests - """ - - class reset_pyparsing_context: - """ - Context manager to be used when writing unit tests that modify pyparsing config values: - - packrat parsing - - default whitespace characters. - - default keyword characters - - literal string auto-conversion class - - __diag__ settings - - Example: - with reset_pyparsing_context(): - # test that literals used to construct a grammar are automatically suppressed - ParserElement.inlineLiteralsUsing(Suppress) - - term = Word(alphas) | Word(nums) - group = Group('(' + term[...] + ')') - - # assert that the '()' characters are not included in the parsed tokens - self.assertParseAndCheckLisst(group, "(abc 123 def)", ['abc', '123', 'def']) - - # after exiting context manager, literals are converted to Literal expressions again - """ - - def __init__(self): - self._save_context = {} - - def save(self): - self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS - self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - self._save_context[ - "literal_string_class" - ] = ParserElement._literalStringClass - self._save_context["packrat_enabled"] = ParserElement._packratEnabled - self._save_context["packrat_parse"] = ParserElement._parse - self._save_context["__diag__"] = { - name: getattr(__diag__, name) for name in __diag__._all_names - } - self._save_context["__compat__"] = { - "collect_all_And_tokens": __compat__.collect_all_And_tokens - } - return self - - def restore(self): - # reset pyparsing global state - if ( - ParserElement.DEFAULT_WHITE_CHARS - != self._save_context["default_whitespace"] - ): - ParserElement.setDefaultWhitespaceChars( - self._save_context["default_whitespace"] - ) - Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] - ParserElement.inlineLiteralsUsing( - self._save_context["literal_string_class"] - ) - for name, value in self._save_context["__diag__"].items(): - setattr(__diag__, name, value) - ParserElement._packratEnabled = self._save_context["packrat_enabled"] - ParserElement._parse = self._save_context["packrat_parse"] - __compat__.collect_all_And_tokens = self._save_context["__compat__"] - - def __enter__(self): - return self.save() - - def __exit__(self, *args): - return self.restore() - - class TestParseResultsAsserts: - """ - A mixin class to add parse results assertion methods to normal unittest.TestCase classes. - """ - def assertParseResultsEquals( - self, result, expected_list=None, expected_dict=None, msg=None - ): - """ - Unit test assertion to compare a ParseResults object with an optional expected_list, - and compare any defined results names with an optional expected_dict. - """ - if expected_list is not None: - self.assertEqual(expected_list, result.asList(), msg=msg) - if expected_dict is not None: - self.assertEqual(expected_dict, result.asDict(), msg=msg) - - def assertParseAndCheckList( - self, expr, test_string, expected_list, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ParseResults.asList() is equal to the expected_list. - """ - result = expr.parseString(test_string, parseAll=True) - if verbose: - print(result.dump()) - self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) - - def assertParseAndCheckDict( - self, expr, test_string, expected_dict, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ParseResults.asDict() is equal to the expected_dict. - """ - result = expr.parseString(test_string, parseAll=True) - if verbose: - print(result.dump()) - self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) - - def assertRunTestResults( - self, run_tests_report, expected_parse_results=None, msg=None - ): - """ - Unit test assertion to evaluate output of ParserElement.runTests(). If a list of - list-dict tuples is given as the expected_parse_results argument, then these are zipped - with the report tuples returned by runTests and evaluated using assertParseResultsEquals. - Finally, asserts that the overall runTests() success value is True. - - :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests - :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] - """ - run_test_success, run_test_results = run_tests_report - - if expected_parse_results is not None: - merged = [ - (rpt[0], rpt[1], expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next( - (exp for exp in expected if isinstance(exp, str)), None - ) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, - ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None - ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print("no validation for {!r}".format(test_string)) - - # do this last, in case some specific test results can be reported instead - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - - @contextmanager - def assertRaisesParseException(self, exc_type=ParseException, msg=None): - with self.assertRaises(exc_type, msg=msg): - yield - - -if __name__ == "__main__": - - selectToken = CaselessLiteral("select") - fromToken = CaselessLiteral("from") - - ident = Word(alphas, alphanums + "_$") - - columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - columnNameList = Group(delimitedList(columnName)).setName("columns") - columnSpec = ('*' | columnNameList) - - tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - tableNameList = Group(delimitedList(tableName)).setName("tables") - - simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") - - # demo runTests method, including embedded comments in test string - simpleSQL.runTests(""" - # '*' as column list and dotted table name - select * from SYS.XYZZY - - # caseless match on "SELECT", and casts back to "select" - SELECT * from XYZZY, ABC - - # list of column names, and mixed case SELECT keyword - Select AA,BB,CC from Sys.dual - - # multiple tables - Select A, B, C from Sys.dual, Table2 - - # invalid SELECT keyword - should fail - Xelect A, B, C from Sys.dual - - # incomplete command - should fail - Select - - # invalid column name - should fail - Select ^^^ frox Sys.dual - - """) - - pyparsing_common.number.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - # any int or real number, returned as float - pyparsing_common.fnumber.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - pyparsing_common.hex_integer.runTests(""" - 100 - FF - """) - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(""" - 12345678-1234-5678-1234-567812345678 - """) diff --git a/env/Lib/site-packages/pip/_vendor/requests/__init__.py b/env/Lib/site-packages/pip/_vendor/requests/__init__.py index 18046c45138f97fe8865d5bbcbe03c60244600b8..10ff67ff4d2bca253a91e4e6461ad096b41da03a 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/requests/__init__.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) @@ -40,53 +38,78 @@ is at <https://requests.readthedocs.io>. :license: Apache 2.0, see LICENSE for more details. """ -from pip._vendor import urllib3 -from pip._vendor import chardet import warnings + +from pip._vendor import urllib3 + from .exceptions import RequestsDependencyWarning +charset_normalizer_version = None + +try: + from pip._vendor.chardet import __version__ as chardet_version +except ImportError: + chardet_version = None + -def check_compatibility(urllib3_version, chardet_version): - urllib3_version = urllib3_version.split('.') - assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. +def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): + urllib3_version = urllib3_version.split(".") + assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. # Sometimes, urllib3 only reports its version as 16.1. if len(urllib3_version) == 2: - urllib3_version.append('0') + urllib3_version.append("0") # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 major, minor, patch = int(major), int(minor), int(patch) - # urllib3 >= 1.21.1, <= 1.26 - assert major == 1 - assert minor >= 21 - assert minor <= 26 - - # Check chardet for compatibility. - major, minor, patch = chardet_version.split('.')[:3] - major, minor, patch = int(major), int(minor), int(patch) - # chardet >= 3.0.2, < 5.0.0 - assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0) + # urllib3 >= 1.21.1 + assert major >= 1 + if major == 1: + assert minor >= 21 + + # Check charset_normalizer for compatibility. + if chardet_version: + major, minor, patch = chardet_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # chardet_version >= 3.0.2, < 6.0.0 + assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0) + elif charset_normalizer_version: + major, minor, patch = charset_normalizer_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # charset_normalizer >= 2.0.0 < 4.0.0 + assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0) + else: + raise Exception("You need either charset_normalizer or chardet installed") def _check_cryptography(cryptography_version): # cryptography < 1.3.4 try: - cryptography_version = list(map(int, cryptography_version.split('.'))) + cryptography_version = list(map(int, cryptography_version.split("."))) except ValueError: return if cryptography_version < [1, 3, 4]: - warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) + warning = "Old version of cryptography ({}) may cause slowdown.".format( + cryptography_version + ) warnings.warn(warning, RequestsDependencyWarning) + # Check imported dependencies for compatibility. try: - check_compatibility(urllib3.__version__, chardet.__version__) + check_compatibility( + urllib3.__version__, chardet_version, charset_normalizer_version + ) except (AssertionError, ValueError): - warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported " - "version!".format(urllib3.__version__, chardet.__version__), - RequestsDependencyWarning) + warnings.warn( + "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " + "version!".format( + urllib3.__version__, chardet_version, charset_normalizer_version + ), + RequestsDependencyWarning, + ) # Attempt to enable urllib3's fallback for SNI support # if the standard library doesn't support SNI or the @@ -104,39 +127,56 @@ try: if not getattr(ssl, "HAS_SNI", False): from pip._vendor.urllib3.contrib import pyopenssl + pyopenssl.inject_into_urllib3() # Check cryptography version from cryptography import __version__ as cryptography_version + _check_cryptography(cryptography_version) except ImportError: pass # urllib3's DependencyWarnings should be silenced. from pip._vendor.urllib3.exceptions import DependencyWarning -warnings.simplefilter('ignore', DependencyWarning) - -from .__version__ import __title__, __description__, __url__, __version__ -from .__version__ import __build__, __author__, __author_email__, __license__ -from .__version__ import __copyright__, __cake__ -from . import utils -from . import packages -from .models import Request, Response, PreparedRequest -from .api import request, get, head, post, patch, put, delete, options -from .sessions import session, Session -from .status_codes import codes -from .exceptions import ( - RequestException, Timeout, URLRequired, - TooManyRedirects, HTTPError, ConnectionError, - FileModeWarning, ConnectTimeout, ReadTimeout -) +warnings.simplefilter("ignore", DependencyWarning) # Set default logging handler to avoid "No handler found" warnings. import logging from logging import NullHandler +from . import packages, utils +from .__version__ import ( + __author__, + __author_email__, + __build__, + __cake__, + __copyright__, + __description__, + __license__, + __title__, + __url__, + __version__, +) +from .api import delete, get, head, options, patch, post, put, request +from .exceptions import ( + ConnectionError, + ConnectTimeout, + FileModeWarning, + HTTPError, + JSONDecodeError, + ReadTimeout, + RequestException, + Timeout, + TooManyRedirects, + URLRequired, +) +from .models import PreparedRequest, Request, Response +from .sessions import Session, session +from .status_codes import codes + logging.getLogger(__name__).addHandler(NullHandler()) # FileModeWarnings go off per the default. -warnings.simplefilter('default', FileModeWarning, append=True) +warnings.simplefilter("default", FileModeWarning, append=True) diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 352e07bdce4aebb4420eb0b53a2840e8d474e02b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-39.pyc deleted file mode 100644 index 14b384f2fe0b155693eda83433bd2e85ff380d4a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-39.pyc deleted file mode 100644 index 374571eee1605a25b55e8d58efb33f090fa984aa..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-39.pyc deleted file mode 100644 index df29b1900ffe0afb90f0b3fb96e51ee647c48a4e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-39.pyc deleted file mode 100644 index 14204a4270461ca36cb2de310a9dbde001094927..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/api.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-39.pyc deleted file mode 100644 index 7a92375c9f29e9ad6e4ded4cb5b504ef84437755..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-39.pyc deleted file mode 100644 index d0348e7aafc7bdc2141b1a15da6910c69d712750..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-39.pyc deleted file mode 100644 index 5c24a9d94c66d7c7c7b8d367cab8cd078b2a0818..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-39.pyc deleted file mode 100644 index a7ccf7ba7d882764fffa266e696cb587eeefaf6c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-39.pyc deleted file mode 100644 index 2458922bbb3e95b7590746342d0e8059d3c97942..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-39.pyc deleted file mode 100644 index d12a2de85885cf37b9ceae801f6bb718a9da7d00..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/help.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-39.pyc deleted file mode 100644 index 0b85dce6ca22560e9f9887ee792b05b55ad6ef00..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-39.pyc deleted file mode 100644 index 21140b24e3ba1680b590f4f1c625943bf02f587c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/models.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-39.pyc deleted file mode 100644 index f557c13f250ce06d9f6999e7130794cb7779b710..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-39.pyc deleted file mode 100644 index 76ee71a4f84d66ee3c41b423d37b513e35f86327..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-39.pyc deleted file mode 100644 index 3b6879372aaf03a33d9916feb4d0ccdda39369b8..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-39.pyc deleted file mode 100644 index fb9e8d36fb22c87b967f26a81413e07e8b8ecbc6..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-39.pyc deleted file mode 100644 index ce754ef04535db9472a6f2b5c333a31161c226e5..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/requests/__version__.py b/env/Lib/site-packages/pip/_vendor/requests/__version__.py index 1267488d28c6b6c03063c80c1ef8192ae20fc7e8..5063c3f8ee7980493efcc30c24f7e7582714aa81 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/__version__.py +++ b/env/Lib/site-packages/pip/_vendor/requests/__version__.py @@ -2,13 +2,13 @@ # |( |- |.| | | |- `-. | `-. # ' ' `-' `-`.`-' `-' `-' ' `-' -__title__ = 'requests' -__description__ = 'Python HTTP for Humans.' -__url__ = 'https://requests.readthedocs.io' -__version__ = '2.25.1' -__build__ = 0x022501 -__author__ = 'Kenneth Reitz' -__author_email__ = 'me@kennethreitz.org' -__license__ = 'Apache 2.0' -__copyright__ = 'Copyright 2020 Kenneth Reitz' -__cake__ = u'\u2728 \U0001f370 \u2728' +__title__ = "requests" +__description__ = "Python HTTP for Humans." +__url__ = "https://requests.readthedocs.io" +__version__ = "2.31.0" +__build__ = 0x023100 +__author__ = "Kenneth Reitz" +__author_email__ = "me@kennethreitz.org" +__license__ = "Apache 2.0" +__copyright__ = "Copyright Kenneth Reitz" +__cake__ = "\u2728 \U0001f370 \u2728" diff --git a/env/Lib/site-packages/pip/_vendor/requests/_internal_utils.py b/env/Lib/site-packages/pip/_vendor/requests/_internal_utils.py index 759d9a56ba0102bb3b7b3abfcfae6731a2ecc243..f2cf635e2937ee9b123a1498c5c5f723a6e20084 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/_internal_utils.py +++ b/env/Lib/site-packages/pip/_vendor/requests/_internal_utils.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests._internal_utils ~~~~~~~~~~~~~~ @@ -7,11 +5,24 @@ requests._internal_utils Provides utility functions that are consumed internally by Requests which depend on extremely few external helpers (such as compat) """ +import re + +from .compat import builtin_str + +_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$") +_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$") +_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$") +_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$") -from .compat import is_py2, builtin_str, str +_HEADER_VALIDATORS_STR = (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR) +_HEADER_VALIDATORS_BYTE = (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE) +HEADER_VALIDATORS = { + bytes: _HEADER_VALIDATORS_BYTE, + str: _HEADER_VALIDATORS_STR, +} -def to_native_string(string, encoding='ascii'): +def to_native_string(string, encoding="ascii"): """Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. @@ -19,10 +30,7 @@ def to_native_string(string, encoding='ascii'): if isinstance(string, builtin_str): out = string else: - if is_py2: - out = string.encode(encoding) - else: - out = string.decode(encoding) + out = string.decode(encoding) return out @@ -36,7 +44,7 @@ def unicode_is_ascii(u_string): """ assert isinstance(u_string, str) try: - u_string.encode('ascii') + u_string.encode("ascii") return True except UnicodeEncodeError: return False diff --git a/env/Lib/site-packages/pip/_vendor/requests/adapters.py b/env/Lib/site-packages/pip/_vendor/requests/adapters.py index c30e7c92dc7f1688c86c345b47656b3561d71373..10c176790b622465538788d73a9e3afee99b3875 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/adapters.py +++ b/env/Lib/site-packages/pip/_vendor/requests/adapters.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.adapters ~~~~~~~~~~~~~~~~~ @@ -9,57 +7,75 @@ and maintain connections. """ import os.path -import socket +import socket # noqa: F401 -from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url -from pip._vendor.urllib3.response import HTTPResponse -from pip._vendor.urllib3.util import parse_url -from pip._vendor.urllib3.util import Timeout as TimeoutSauce -from pip._vendor.urllib3.util.retry import Retry -from pip._vendor.urllib3.exceptions import ClosedPoolError -from pip._vendor.urllib3.exceptions import ConnectTimeoutError +from pip._vendor.urllib3.exceptions import ClosedPoolError, ConnectTimeoutError from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError -from pip._vendor.urllib3.exceptions import MaxRetryError -from pip._vendor.urllib3.exceptions import NewConnectionError +from pip._vendor.urllib3.exceptions import InvalidHeader as _InvalidHeader +from pip._vendor.urllib3.exceptions import ( + LocationValueError, + MaxRetryError, + NewConnectionError, + ProtocolError, +) from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError -from pip._vendor.urllib3.exceptions import ProtocolError -from pip._vendor.urllib3.exceptions import ReadTimeoutError +from pip._vendor.urllib3.exceptions import ReadTimeoutError, ResponseError from pip._vendor.urllib3.exceptions import SSLError as _SSLError -from pip._vendor.urllib3.exceptions import ResponseError -from pip._vendor.urllib3.exceptions import LocationValueError +from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url +from pip._vendor.urllib3.util import Timeout as TimeoutSauce +from pip._vendor.urllib3.util import parse_url +from pip._vendor.urllib3.util.retry import Retry +from .auth import _basic_auth_str +from .compat import basestring, urlparse +from .cookies import extract_cookies_to_jar +from .exceptions import ( + ConnectionError, + ConnectTimeout, + InvalidHeader, + InvalidProxyURL, + InvalidSchema, + InvalidURL, + ProxyError, + ReadTimeout, + RetryError, + SSLError, +) from .models import Response -from .compat import urlparse, basestring -from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, - get_encoding_from_headers, prepend_scheme_if_needed, - get_auth_from_url, urldefragauth, select_proxy) from .structures import CaseInsensitiveDict -from .cookies import extract_cookies_to_jar -from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, - ProxyError, RetryError, InvalidSchema, InvalidProxyURL, - InvalidURL) -from .auth import _basic_auth_str +from .utils import ( + DEFAULT_CA_BUNDLE_PATH, + extract_zipped_paths, + get_auth_from_url, + get_encoding_from_headers, + prepend_scheme_if_needed, + select_proxy, + urldefragauth, +) try: from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager except ImportError: + def SOCKSProxyManager(*args, **kwargs): raise InvalidSchema("Missing dependencies for SOCKS support.") + DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 DEFAULT_POOL_TIMEOUT = None -class BaseAdapter(object): +class BaseAdapter: """The Base Transport Adapter""" def __init__(self): - super(BaseAdapter, self).__init__() + super().__init__() - def send(self, request, stream=False, timeout=None, verify=True, - cert=None, proxies=None): + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. @@ -107,12 +123,22 @@ class HTTPAdapter(BaseAdapter): >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ - __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', - '_pool_block'] - def __init__(self, pool_connections=DEFAULT_POOLSIZE, - pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, - pool_block=DEFAULT_POOLBLOCK): + __attrs__ = [ + "max_retries", + "config", + "_pool_connections", + "_pool_maxsize", + "_pool_block", + ] + + def __init__( + self, + pool_connections=DEFAULT_POOLSIZE, + pool_maxsize=DEFAULT_POOLSIZE, + max_retries=DEFAULT_RETRIES, + pool_block=DEFAULT_POOLBLOCK, + ): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: @@ -120,7 +146,7 @@ class HTTPAdapter(BaseAdapter): self.config = {} self.proxy_manager = {} - super(HTTPAdapter, self).__init__() + super().__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize @@ -140,10 +166,13 @@ class HTTPAdapter(BaseAdapter): for attr, value in state.items(): setattr(self, attr, value) - self.init_poolmanager(self._pool_connections, self._pool_maxsize, - block=self._pool_block) + self.init_poolmanager( + self._pool_connections, self._pool_maxsize, block=self._pool_block + ) - def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): + def init_poolmanager( + self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs + ): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only @@ -160,8 +189,12 @@ class HTTPAdapter(BaseAdapter): self._pool_maxsize = maxsize self._pool_block = block - self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, - block=block, strict=True, **pool_kwargs) + self.poolmanager = PoolManager( + num_pools=connections, + maxsize=maxsize, + block=block, + **pool_kwargs, + ) def proxy_manager_for(self, proxy, **proxy_kwargs): """Return urllib3 ProxyManager for the given proxy. @@ -177,7 +210,7 @@ class HTTPAdapter(BaseAdapter): """ if proxy in self.proxy_manager: manager = self.proxy_manager[proxy] - elif proxy.lower().startswith('socks'): + elif proxy.lower().startswith("socks"): username, password = get_auth_from_url(proxy) manager = self.proxy_manager[proxy] = SOCKSProxyManager( proxy, @@ -186,7 +219,7 @@ class HTTPAdapter(BaseAdapter): num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, - **proxy_kwargs + **proxy_kwargs, ) else: proxy_headers = self.proxy_headers(proxy) @@ -196,7 +229,8 @@ class HTTPAdapter(BaseAdapter): num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, - **proxy_kwargs) + **proxy_kwargs, + ) return manager @@ -212,7 +246,7 @@ class HTTPAdapter(BaseAdapter): to a CA bundle to use :param cert: The SSL certificate to verify. """ - if url.lower().startswith('https') and verify: + if url.lower().startswith("https") and verify: cert_loc = None @@ -224,17 +258,19 @@ class HTTPAdapter(BaseAdapter): cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) if not cert_loc or not os.path.exists(cert_loc): - raise IOError("Could not find a suitable TLS CA certificate bundle, " - "invalid path: {}".format(cert_loc)) + raise OSError( + f"Could not find a suitable TLS CA certificate bundle, " + f"invalid path: {cert_loc}" + ) - conn.cert_reqs = 'CERT_REQUIRED' + conn.cert_reqs = "CERT_REQUIRED" if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: - conn.cert_reqs = 'CERT_NONE' + conn.cert_reqs = "CERT_NONE" conn.ca_certs = None conn.ca_cert_dir = None @@ -246,11 +282,14 @@ class HTTPAdapter(BaseAdapter): conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): - raise IOError("Could not find the TLS certificate file, " - "invalid path: {}".format(conn.cert_file)) + raise OSError( + f"Could not find the TLS certificate file, " + f"invalid path: {conn.cert_file}" + ) if conn.key_file and not os.path.exists(conn.key_file): - raise IOError("Could not find the TLS key file, " - "invalid path: {}".format(conn.key_file)) + raise OSError( + f"Could not find the TLS key file, invalid path: {conn.key_file}" + ) def build_response(self, req, resp): """Builds a :class:`Response <requests.Response>` object from a urllib3 @@ -265,10 +304,10 @@ class HTTPAdapter(BaseAdapter): response = Response() # Fallback to None if there's no status_code, for whatever reason. - response.status_code = getattr(resp, 'status', None) + response.status_code = getattr(resp, "status", None) # Make headers case-insensitive. - response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) + response.headers = CaseInsensitiveDict(getattr(resp, "headers", {})) # Set encoding. response.encoding = get_encoding_from_headers(response.headers) @@ -276,7 +315,7 @@ class HTTPAdapter(BaseAdapter): response.reason = response.raw.reason if isinstance(req.url, bytes): - response.url = req.url.decode('utf-8') + response.url = req.url.decode("utf-8") else: response.url = req.url @@ -301,11 +340,13 @@ class HTTPAdapter(BaseAdapter): proxy = select_proxy(url, proxies) if proxy: - proxy = prepend_scheme_if_needed(proxy, 'http') + proxy = prepend_scheme_if_needed(proxy, "http") proxy_url = parse_url(proxy) if not proxy_url.host: - raise InvalidProxyURL("Please check proxy URL. It is malformed" - " and could be missing the host.") + raise InvalidProxyURL( + "Please check proxy URL. It is malformed " + "and could be missing the host." + ) proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: @@ -343,11 +384,11 @@ class HTTPAdapter(BaseAdapter): proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme - is_proxied_http_request = (proxy and scheme != 'https') + is_proxied_http_request = proxy and scheme != "https" using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() - using_socks_proxy = proxy_scheme.startswith('socks') + using_socks_proxy = proxy_scheme.startswith("socks") url = request.path_url if is_proxied_http_request and not using_socks_proxy: @@ -386,12 +427,13 @@ class HTTPAdapter(BaseAdapter): username, password = get_auth_from_url(proxy) if username: - headers['Proxy-Authorization'] = _basic_auth_str(username, - password) + headers["Proxy-Authorization"] = _basic_auth_str(username, password) return headers - def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. @@ -415,86 +457,47 @@ class HTTPAdapter(BaseAdapter): self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) - self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) + self.add_headers( + request, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + ) - chunked = not (request.body is None or 'Content-Length' in request.headers) + chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) - except ValueError as e: - # this may raise a string formatting error. - err = ("Invalid timeout {}. Pass a (connect, read) " - "timeout tuple, or a single float to set " - "both timeouts to the same value".format(timeout)) - raise ValueError(err) + except ValueError: + raise ValueError( + f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " + f"or a single float to set both timeouts to the same value." + ) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: - if not chunked: - resp = conn.urlopen( - method=request.method, - url=url, - body=request.body, - headers=request.headers, - redirect=False, - assert_same_host=False, - preload_content=False, - decode_content=False, - retries=self.max_retries, - timeout=timeout - ) + resp = conn.urlopen( + method=request.method, + url=url, + body=request.body, + headers=request.headers, + redirect=False, + assert_same_host=False, + preload_content=False, + decode_content=False, + retries=self.max_retries, + timeout=timeout, + chunked=chunked, + ) - # Send the request. - else: - if hasattr(conn, 'proxy_pool'): - conn = conn.proxy_pool - - low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) - - try: - low_conn.putrequest(request.method, - url, - skip_accept_encoding=True) - - for header, value in request.headers.items(): - low_conn.putheader(header, value) - - low_conn.endheaders() - - for i in request.body: - low_conn.send(hex(len(i))[2:].encode('utf-8')) - low_conn.send(b'\r\n') - low_conn.send(i) - low_conn.send(b'\r\n') - low_conn.send(b'0\r\n\r\n') - - # Receive the response from the server - try: - # For Python 2.7, use buffering of HTTP responses - r = low_conn.getresponse(buffering=True) - except TypeError: - # For compatibility with Python 3.3+ - r = low_conn.getresponse() - - resp = HTTPResponse.from_httplib( - r, - pool=conn, - connection=low_conn, - preload_content=False, - decode_content=False - ) - except: - # If we hit any problems here, clean up the connection. - # Then, reraise so that we can handle the actual exception. - low_conn.close() - raise - - except (ProtocolError, socket.error) as err: + except (ProtocolError, OSError) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: @@ -527,6 +530,8 @@ class HTTPAdapter(BaseAdapter): raise SSLError(e, request=request) elif isinstance(e, ReadTimeoutError): raise ReadTimeout(e, request=request) + elif isinstance(e, _InvalidHeader): + raise InvalidHeader(e, request=request) else: raise diff --git a/env/Lib/site-packages/pip/_vendor/requests/api.py b/env/Lib/site-packages/pip/_vendor/requests/api.py index e978e2031186f656bd96fc296773ec42572053d3..cd0b3eeac3ebca7fe4a627ba5a96c1bbaf827d4f 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/api.py +++ b/env/Lib/site-packages/pip/_vendor/requests/api.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.api ~~~~~~~~~~~~ @@ -72,8 +70,7 @@ def get(url, params=None, **kwargs): :rtype: requests.Response """ - kwargs.setdefault('allow_redirects', True) - return request('get', url, params=params, **kwargs) + return request("get", url, params=params, **kwargs) def options(url, **kwargs): @@ -85,8 +82,7 @@ def options(url, **kwargs): :rtype: requests.Response """ - kwargs.setdefault('allow_redirects', True) - return request('options', url, **kwargs) + return request("options", url, **kwargs) def head(url, **kwargs): @@ -100,8 +96,8 @@ def head(url, **kwargs): :rtype: requests.Response """ - kwargs.setdefault('allow_redirects', False) - return request('head', url, **kwargs) + kwargs.setdefault("allow_redirects", False) + return request("head", url, **kwargs) def post(url, data=None, json=None, **kwargs): @@ -110,13 +106,13 @@ def post(url, data=None, json=None, **kwargs): :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. - :param json: (optional) json data to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ - return request('post', url, data=data, json=json, **kwargs) + return request("post", url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): @@ -125,13 +121,13 @@ def put(url, data=None, **kwargs): :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. - :param json: (optional) json data to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ - return request('put', url, data=data, **kwargs) + return request("put", url, data=data, **kwargs) def patch(url, data=None, **kwargs): @@ -140,13 +136,13 @@ def patch(url, data=None, **kwargs): :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. - :param json: (optional) json data to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ - return request('patch', url, data=data, **kwargs) + return request("patch", url, data=data, **kwargs) def delete(url, **kwargs): @@ -158,4 +154,4 @@ def delete(url, **kwargs): :rtype: requests.Response """ - return request('delete', url, **kwargs) + return request("delete", url, **kwargs) diff --git a/env/Lib/site-packages/pip/_vendor/requests/auth.py b/env/Lib/site-packages/pip/_vendor/requests/auth.py index eeface39ae62c3975ff535e6b1f79f2c28fbf888..9733686ddb36b826ead4f4666d42311397fa6fec 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/auth.py +++ b/env/Lib/site-packages/pip/_vendor/requests/auth.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.auth ~~~~~~~~~~~~~ @@ -7,22 +5,21 @@ requests.auth This module contains the authentication handlers for Requests. """ +import hashlib import os import re -import time -import hashlib import threading +import time import warnings - from base64 import b64encode -from .compat import urlparse, str, basestring -from .cookies import extract_cookies_to_jar from ._internal_utils import to_native_string +from .compat import basestring, str, urlparse +from .cookies import extract_cookies_to_jar from .utils import parse_dict_header -CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' -CONTENT_TYPE_MULTI_PART = 'multipart/form-data' +CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" +CONTENT_TYPE_MULTI_PART = "multipart/form-data" def _basic_auth_str(username, password): @@ -57,23 +54,23 @@ def _basic_auth_str(username, password): # -- End Removal -- if isinstance(username, str): - username = username.encode('latin1') + username = username.encode("latin1") if isinstance(password, str): - password = password.encode('latin1') + password = password.encode("latin1") - authstr = 'Basic ' + to_native_string( - b64encode(b':'.join((username, password))).strip() + authstr = "Basic " + to_native_string( + b64encode(b":".join((username, password))).strip() ) return authstr -class AuthBase(object): +class AuthBase: """Base class that all auth implementations derive from""" def __call__(self, r): - raise NotImplementedError('Auth hooks must be callable.') + raise NotImplementedError("Auth hooks must be callable.") class HTTPBasicAuth(AuthBase): @@ -84,16 +81,18 @@ class HTTPBasicAuth(AuthBase): self.password = password def __eq__(self, other): - return all([ - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None) - ]) + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) def __ne__(self, other): return not self == other def __call__(self, r): - r.headers['Authorization'] = _basic_auth_str(self.username, self.password) + r.headers["Authorization"] = _basic_auth_str(self.username, self.password) return r @@ -101,7 +100,7 @@ class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): - r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) + r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password) return r @@ -116,9 +115,9 @@ class HTTPDigestAuth(AuthBase): def init_per_thread_state(self): # Ensure state is initialized just once per-thread - if not hasattr(self._thread_local, 'init'): + if not hasattr(self._thread_local, "init"): self._thread_local.init = True - self._thread_local.last_nonce = '' + self._thread_local.last_nonce = "" self._thread_local.nonce_count = 0 self._thread_local.chal = {} self._thread_local.pos = None @@ -129,44 +128,52 @@ class HTTPDigestAuth(AuthBase): :rtype: str """ - realm = self._thread_local.chal['realm'] - nonce = self._thread_local.chal['nonce'] - qop = self._thread_local.chal.get('qop') - algorithm = self._thread_local.chal.get('algorithm') - opaque = self._thread_local.chal.get('opaque') + realm = self._thread_local.chal["realm"] + nonce = self._thread_local.chal["nonce"] + qop = self._thread_local.chal.get("qop") + algorithm = self._thread_local.chal.get("algorithm") + opaque = self._thread_local.chal.get("opaque") hash_utf8 = None if algorithm is None: - _algorithm = 'MD5' + _algorithm = "MD5" else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level - if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': + if _algorithm == "MD5" or _algorithm == "MD5-SESS": + def md5_utf8(x): if isinstance(x, str): - x = x.encode('utf-8') + x = x.encode("utf-8") return hashlib.md5(x).hexdigest() + hash_utf8 = md5_utf8 - elif _algorithm == 'SHA': + elif _algorithm == "SHA": + def sha_utf8(x): if isinstance(x, str): - x = x.encode('utf-8') + x = x.encode("utf-8") return hashlib.sha1(x).hexdigest() + hash_utf8 = sha_utf8 - elif _algorithm == 'SHA-256': + elif _algorithm == "SHA-256": + def sha256_utf8(x): if isinstance(x, str): - x = x.encode('utf-8') + x = x.encode("utf-8") return hashlib.sha256(x).hexdigest() + hash_utf8 = sha256_utf8 - elif _algorithm == 'SHA-512': + elif _algorithm == "SHA-512": + def sha512_utf8(x): if isinstance(x, str): - x = x.encode('utf-8') + x = x.encode("utf-8") return hashlib.sha512(x).hexdigest() + hash_utf8 = sha512_utf8 - KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) + KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731 if hash_utf8 is None: return None @@ -177,10 +184,10 @@ class HTTPDigestAuth(AuthBase): #: path is request-uri defined in RFC 2616 which should not be empty path = p_parsed.path or "/" if p_parsed.query: - path += '?' + p_parsed.query + path += f"?{p_parsed.query}" - A1 = '%s:%s:%s' % (self.username, realm, self.password) - A2 = '%s:%s' % (method, path) + A1 = f"{self.username}:{realm}:{self.password}" + A2 = f"{method}:{path}" HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) @@ -189,22 +196,20 @@ class HTTPDigestAuth(AuthBase): self._thread_local.nonce_count += 1 else: self._thread_local.nonce_count = 1 - ncvalue = '%08x' % self._thread_local.nonce_count - s = str(self._thread_local.nonce_count).encode('utf-8') - s += nonce.encode('utf-8') - s += time.ctime().encode('utf-8') + ncvalue = f"{self._thread_local.nonce_count:08x}" + s = str(self._thread_local.nonce_count).encode("utf-8") + s += nonce.encode("utf-8") + s += time.ctime().encode("utf-8") s += os.urandom(8) - cnonce = (hashlib.sha1(s).hexdigest()[:16]) - if _algorithm == 'MD5-SESS': - HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) + cnonce = hashlib.sha1(s).hexdigest()[:16] + if _algorithm == "MD5-SESS": + HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}") if not qop: - respdig = KD(HA1, "%s:%s" % (nonce, HA2)) - elif qop == 'auth' or 'auth' in qop.split(','): - noncebit = "%s:%s:%s:%s:%s" % ( - nonce, ncvalue, cnonce, 'auth', HA2 - ) + respdig = KD(HA1, f"{nonce}:{HA2}") + elif qop == "auth" or "auth" in qop.split(","): + noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}" respdig = KD(HA1, noncebit) else: # XXX handle auth-int. @@ -213,18 +218,20 @@ class HTTPDigestAuth(AuthBase): self._thread_local.last_nonce = nonce # XXX should the partial digests be encoded too? - base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ - 'response="%s"' % (self.username, realm, nonce, path, respdig) + base = ( + f'username="{self.username}", realm="{realm}", nonce="{nonce}", ' + f'uri="{path}", response="{respdig}"' + ) if opaque: - base += ', opaque="%s"' % opaque + base += f', opaque="{opaque}"' if algorithm: - base += ', algorithm="%s"' % algorithm + base += f', algorithm="{algorithm}"' if entdig: - base += ', digest="%s"' % entdig + base += f', digest="{entdig}"' if qop: - base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) + base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"' - return 'Digest %s' % (base) + return f"Digest {base}" def handle_redirect(self, r, **kwargs): """Reset num_401_calls counter on redirects.""" @@ -248,13 +255,13 @@ class HTTPDigestAuth(AuthBase): # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self._thread_local.pos) - s_auth = r.headers.get('www-authenticate', '') + s_auth = r.headers.get("www-authenticate", "") - if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: + if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2: self._thread_local.num_401_calls += 1 - pat = re.compile(r'digest ', flags=re.IGNORECASE) - self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) + pat = re.compile(r"digest ", flags=re.IGNORECASE) + self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. @@ -264,8 +271,9 @@ class HTTPDigestAuth(AuthBase): extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) - prep.headers['Authorization'] = self.build_digest_header( - prep.method, prep.url) + prep.headers["Authorization"] = self.build_digest_header( + prep.method, prep.url + ) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep @@ -280,7 +288,7 @@ class HTTPDigestAuth(AuthBase): self.init_per_thread_state() # If we have a saved nonce, skip the 401 if self._thread_local.last_nonce: - r.headers['Authorization'] = self.build_digest_header(r.method, r.url) + r.headers["Authorization"] = self.build_digest_header(r.method, r.url) try: self._thread_local.pos = r.body.tell() except AttributeError: @@ -289,17 +297,19 @@ class HTTPDigestAuth(AuthBase): # file position of the previous body. Ensure it's set to # None. self._thread_local.pos = None - r.register_hook('response', self.handle_401) - r.register_hook('response', self.handle_redirect) + r.register_hook("response", self.handle_401) + r.register_hook("response", self.handle_redirect) self._thread_local.num_401_calls = 1 return r def __eq__(self, other): - return all([ - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None) - ]) + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) def __ne__(self, other): return not self == other diff --git a/env/Lib/site-packages/pip/_vendor/requests/certs.py b/env/Lib/site-packages/pip/_vendor/requests/certs.py index 06a594e58f6746041edf371bc3dc8ca42b612322..38696a1fb3419dd810004d5aec9654e5224042ed 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/certs.py +++ b/env/Lib/site-packages/pip/_vendor/requests/certs.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ requests.certs @@ -12,7 +11,14 @@ If you are packaging Requests, e.g., for a Linux distribution or a managed environment, you can change the definition of where() to return a separately packaged CA bundle. """ -from pip._vendor.certifi import where -if __name__ == '__main__': +import os + +if "_PIP_STANDALONE_CERT" not in os.environ: + from pip._vendor.certifi import where +else: + def where(): + return os.environ["_PIP_STANDALONE_CERT"] + +if __name__ == "__main__": print(where()) diff --git a/env/Lib/site-packages/pip/_vendor/requests/compat.py b/env/Lib/site-packages/pip/_vendor/requests/compat.py index 9e29371678b91607a46bfe1ac2006c44df04dcc4..9ab2bb48656520a95ec9ac87d090f2e741f0e544 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/compat.py +++ b/env/Lib/site-packages/pip/_vendor/requests/compat.py @@ -1,11 +1,10 @@ -# -*- coding: utf-8 -*- - """ requests.compat ~~~~~~~~~~~~~~~ -This module handles import compatibility issues between Python 2 and -Python 3. +This module previously handled import compatibility issues +between Python 2 and Python 3. It remains for backwards +compatibility until the next major version. """ from pip._vendor import chardet @@ -20,57 +19,49 @@ import sys _ver = sys.version_info #: Python 2.x? -is_py2 = (_ver[0] == 2) +is_py2 = _ver[0] == 2 #: Python 3.x? -is_py3 = (_ver[0] == 3) +is_py3 = _ver[0] == 3 # Note: We've patched out simplejson support in pip because it prevents # upgrading simplejson on Windows. -# try: -# import simplejson as json -# except (ImportError, SyntaxError): -# # simplejson does not support Python 3.2, it throws a SyntaxError -# # because of u'...' Unicode literals. import json +from json import JSONDecodeError -# --------- -# Specifics -# --------- - -if is_py2: - from urllib import ( - quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, - proxy_bypass, proxy_bypass_environment, getproxies_environment) - from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag - from urllib2 import parse_http_list - import cookielib - from Cookie import Morsel - from StringIO import StringIO - # Keep OrderedDict for backwards compatibility. - from collections import Callable, Mapping, MutableMapping, OrderedDict - - - builtin_str = str - bytes = str - str = unicode - basestring = basestring - numeric_types = (int, long, float) - integer_types = (int, long) +# Keep OrderedDict for backwards compatibility. +from collections import OrderedDict +from collections.abc import Callable, Mapping, MutableMapping +from http import cookiejar as cookielib +from http.cookies import Morsel +from io import StringIO -elif is_py3: - from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag - from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment - from http import cookiejar as cookielib - from http.cookies import Morsel - from io import StringIO - # Keep OrderedDict for backwards compatibility. - from collections import OrderedDict - from collections.abc import Callable, Mapping, MutableMapping +# -------------- +# Legacy Imports +# -------------- +from urllib.parse import ( + quote, + quote_plus, + unquote, + unquote_plus, + urldefrag, + urlencode, + urljoin, + urlparse, + urlsplit, + urlunparse, +) +from urllib.request import ( + getproxies, + getproxies_environment, + parse_http_list, + proxy_bypass, + proxy_bypass_environment, +) - builtin_str = str - str = str - bytes = bytes - basestring = (str, bytes) - numeric_types = (int, float) - integer_types = (int,) +builtin_str = str +str = str +bytes = bytes +basestring = (str, bytes) +numeric_types = (int, float) +integer_types = (int,) diff --git a/env/Lib/site-packages/pip/_vendor/requests/cookies.py b/env/Lib/site-packages/pip/_vendor/requests/cookies.py index 56fccd9c2570d2a31365ed11278fd1b6ecc2aa54..bf54ab237e410603061b8cec8fd195912d3cfb08 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/cookies.py +++ b/env/Lib/site-packages/pip/_vendor/requests/cookies.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.cookies ~~~~~~~~~~~~~~~~ @@ -9,12 +7,12 @@ Compatibility code to be able to use `cookielib.CookieJar` with requests. requests.utils imports from here, so be careful with imports. """ +import calendar import copy import time -import calendar from ._internal_utils import to_native_string -from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping +from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse try: import threading @@ -22,7 +20,7 @@ except ImportError: import dummy_threading as threading -class MockRequest(object): +class MockRequest: """Wraps a `requests.Request` to mimic a `urllib2.Request`. The code in `cookielib.CookieJar` expects this interface in order to correctly @@ -51,16 +49,22 @@ class MockRequest(object): def get_full_url(self): # Only return the response's URL if the user hadn't set the Host # header - if not self._r.headers.get('Host'): + if not self._r.headers.get("Host"): return self._r.url # If they did set it, retrieve it and reconstruct the expected domain - host = to_native_string(self._r.headers['Host'], encoding='utf-8') + host = to_native_string(self._r.headers["Host"], encoding="utf-8") parsed = urlparse(self._r.url) # Reconstruct the URL as we expect it - return urlunparse([ - parsed.scheme, host, parsed.path, parsed.params, parsed.query, - parsed.fragment - ]) + return urlunparse( + [ + parsed.scheme, + host, + parsed.path, + parsed.params, + parsed.query, + parsed.fragment, + ] + ) def is_unverifiable(self): return True @@ -73,7 +77,9 @@ class MockRequest(object): def add_header(self, key, val): """cookielib has no legitimate use for this method; add it back if you find one.""" - raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") + raise NotImplementedError( + "Cookie headers should be added with add_unredirected_header()" + ) def add_unredirected_header(self, name, value): self._new_headers[name] = value @@ -94,7 +100,7 @@ class MockRequest(object): return self.get_host() -class MockResponse(object): +class MockResponse: """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. ...what? Basically, expose the parsed HTTP headers from the server response @@ -122,8 +128,7 @@ def extract_cookies_to_jar(jar, request, response): :param request: our own requests.Request object :param response: urllib3.HTTPResponse object """ - if not (hasattr(response, '_original_response') and - response._original_response): + if not (hasattr(response, "_original_response") and response._original_response): return # the _original_response field is the wrapped httplib.HTTPResponse object, req = MockRequest(request) @@ -140,7 +145,7 @@ def get_cookie_header(jar, request): """ r = MockRequest(request) jar.add_cookie_header(r) - return r.get_new_headers().get('Cookie') + return r.get_new_headers().get("Cookie") def remove_cookie_by_name(cookiejar, name, domain=None, path=None): @@ -205,7 +210,9 @@ class RequestsCookieJar(cookielib.CookieJar, MutableMapping): """ # support client code that unsets cookies by assignment of a None value: if value is None: - remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) + remove_cookie_by_name( + self, name, domain=kwargs.get("domain"), path=kwargs.get("path") + ) return if isinstance(value, Morsel): @@ -305,16 +312,15 @@ class RequestsCookieJar(cookielib.CookieJar, MutableMapping): """ dictionary = {} for cookie in iter(self): - if ( - (domain is None or cookie.domain == domain) and - (path is None or cookie.path == path) + if (domain is None or cookie.domain == domain) and ( + path is None or cookie.path == path ): dictionary[cookie.name] = cookie.value return dictionary def __contains__(self, name): try: - return super(RequestsCookieJar, self).__contains__(name) + return super().__contains__(name) except CookieConflictError: return True @@ -341,9 +347,13 @@ class RequestsCookieJar(cookielib.CookieJar, MutableMapping): remove_cookie_by_name(self, name) def set_cookie(self, cookie, *args, **kwargs): - if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): - cookie.value = cookie.value.replace('\\"', '') - return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) + if ( + hasattr(cookie.value, "startswith") + and cookie.value.startswith('"') + and cookie.value.endswith('"') + ): + cookie.value = cookie.value.replace('\\"', "") + return super().set_cookie(cookie, *args, **kwargs) def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" @@ -351,7 +361,7 @@ class RequestsCookieJar(cookielib.CookieJar, MutableMapping): for cookie in other: self.set_cookie(copy.copy(cookie)) else: - super(RequestsCookieJar, self).update(other) + super().update(other) def _find(self, name, domain=None, path=None): """Requests uses this method internally to get cookie values. @@ -371,7 +381,7 @@ class RequestsCookieJar(cookielib.CookieJar, MutableMapping): if path is None or cookie.path == path: return cookie.value - raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") def _find_no_duplicates(self, name, domain=None, path=None): """Both ``__get_item__`` and ``get`` call this function: it's never @@ -390,25 +400,29 @@ class RequestsCookieJar(cookielib.CookieJar, MutableMapping): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: - if toReturn is not None: # if there are multiple cookies that meet passed in criteria - raise CookieConflictError('There are multiple cookies with name, %r' % (name)) - toReturn = cookie.value # we will eventually return this as long as no cookie conflict + if toReturn is not None: + # if there are multiple cookies that meet passed in criteria + raise CookieConflictError( + f"There are multiple cookies with name, {name!r}" + ) + # we will eventually return this as long as no cookie conflict + toReturn = cookie.value if toReturn: return toReturn - raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") def __getstate__(self): """Unlike a normal CookieJar, this class is pickleable.""" state = self.__dict__.copy() # remove the unpickleable RLock object - state.pop('_cookies_lock') + state.pop("_cookies_lock") return state def __setstate__(self, state): """Unlike a normal CookieJar, this class is pickleable.""" self.__dict__.update(state) - if '_cookies_lock' not in self.__dict__: + if "_cookies_lock" not in self.__dict__: self._cookies_lock = threading.RLock() def copy(self): @@ -427,7 +441,7 @@ def _copy_cookie_jar(jar): if jar is None: return None - if hasattr(jar, 'copy'): + if hasattr(jar, "copy"): # We're dealing with an instance of RequestsCookieJar return jar.copy() # We're dealing with a generic CookieJar instance @@ -445,31 +459,32 @@ def create_cookie(name, value, **kwargs): and sent on every request (this is sometimes called a "supercookie"). """ result = { - 'version': 0, - 'name': name, - 'value': value, - 'port': None, - 'domain': '', - 'path': '/', - 'secure': False, - 'expires': None, - 'discard': True, - 'comment': None, - 'comment_url': None, - 'rest': {'HttpOnly': None}, - 'rfc2109': False, + "version": 0, + "name": name, + "value": value, + "port": None, + "domain": "", + "path": "/", + "secure": False, + "expires": None, + "discard": True, + "comment": None, + "comment_url": None, + "rest": {"HttpOnly": None}, + "rfc2109": False, } badargs = set(kwargs) - set(result) if badargs: - err = 'create_cookie() got unexpected keyword arguments: %s' - raise TypeError(err % list(badargs)) + raise TypeError( + f"create_cookie() got unexpected keyword arguments: {list(badargs)}" + ) result.update(kwargs) - result['port_specified'] = bool(result['port']) - result['domain_specified'] = bool(result['domain']) - result['domain_initial_dot'] = result['domain'].startswith('.') - result['path_specified'] = bool(result['path']) + result["port_specified"] = bool(result["port"]) + result["domain_specified"] = bool(result["domain"]) + result["domain_initial_dot"] = result["domain"].startswith(".") + result["path_specified"] = bool(result["path"]) return cookielib.Cookie(**result) @@ -478,30 +493,28 @@ def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" expires = None - if morsel['max-age']: + if morsel["max-age"]: try: - expires = int(time.time() + int(morsel['max-age'])) + expires = int(time.time() + int(morsel["max-age"])) except ValueError: - raise TypeError('max-age: %s must be integer' % morsel['max-age']) - elif morsel['expires']: - time_template = '%a, %d-%b-%Y %H:%M:%S GMT' - expires = calendar.timegm( - time.strptime(morsel['expires'], time_template) - ) + raise TypeError(f"max-age: {morsel['max-age']} must be integer") + elif morsel["expires"]: + time_template = "%a, %d-%b-%Y %H:%M:%S GMT" + expires = calendar.timegm(time.strptime(morsel["expires"], time_template)) return create_cookie( - comment=morsel['comment'], - comment_url=bool(morsel['comment']), + comment=morsel["comment"], + comment_url=bool(morsel["comment"]), discard=False, - domain=morsel['domain'], + domain=morsel["domain"], expires=expires, name=morsel.key, - path=morsel['path'], + path=morsel["path"], port=None, - rest={'HttpOnly': morsel['httponly']}, + rest={"HttpOnly": morsel["httponly"]}, rfc2109=False, - secure=bool(morsel['secure']), + secure=bool(morsel["secure"]), value=morsel.value, - version=morsel['version'] or 0, + version=morsel["version"] or 0, ) @@ -534,11 +547,10 @@ def merge_cookies(cookiejar, cookies): :rtype: CookieJar """ if not isinstance(cookiejar, cookielib.CookieJar): - raise ValueError('You can only merge into CookieJar') + raise ValueError("You can only merge into CookieJar") if isinstance(cookies, dict): - cookiejar = cookiejar_from_dict( - cookies, cookiejar=cookiejar, overwrite=False) + cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False) elif isinstance(cookies, cookielib.CookieJar): try: cookiejar.update(cookies) diff --git a/env/Lib/site-packages/pip/_vendor/requests/exceptions.py b/env/Lib/site-packages/pip/_vendor/requests/exceptions.py index 9ef9e6e97b81ec5559b572cbc8e4d3b80cc36bce..168d07390dfc366102b8197e4b271e493bd94d11 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/exceptions.py +++ b/env/Lib/site-packages/pip/_vendor/requests/exceptions.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.exceptions ~~~~~~~~~~~~~~~~~~~ @@ -8,6 +6,8 @@ This module contains the set of Requests' exceptions. """ from pip._vendor.urllib3.exceptions import HTTPError as BaseHTTPError +from .compat import JSONDecodeError as CompatJSONDecodeError + class RequestException(IOError): """There was an ambiguous exception that occurred while handling your @@ -16,13 +16,30 @@ class RequestException(IOError): def __init__(self, *args, **kwargs): """Initialize RequestException with `request` and `response` objects.""" - response = kwargs.pop('response', None) + response = kwargs.pop("response", None) self.response = response - self.request = kwargs.pop('request', None) - if (response is not None and not self.request and - hasattr(response, 'request')): + self.request = kwargs.pop("request", None) + if response is not None and not self.request and hasattr(response, "request"): self.request = self.response.request - super(RequestException, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) + + +class InvalidJSONError(RequestException): + """A JSON error occurred.""" + + +class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError): + """Couldn't decode the text into json""" + + def __init__(self, *args, **kwargs): + """ + Construct the JSONDecodeError instance first with all + args. Then use it's args to construct the IOError so that + the json specific args aren't used as IOError specific args + and the error message from JSONDecodeError is preserved. + """ + CompatJSONDecodeError.__init__(self, *args) + InvalidJSONError.__init__(self, *self.args, **kwargs) class HTTPError(RequestException): @@ -70,11 +87,11 @@ class TooManyRedirects(RequestException): class MissingSchema(RequestException, ValueError): - """The URL schema (e.g. http or https) is missing.""" + """The URL scheme (e.g. http or https) is missing.""" class InvalidSchema(RequestException, ValueError): - """See defaults.py for valid schemas.""" + """The URL scheme provided is either invalid or unsupported.""" class InvalidURL(RequestException, ValueError): @@ -108,6 +125,7 @@ class RetryError(RequestException): class UnrewindableBodyError(RequestException): """Requests encountered an error when trying to rewind a body.""" + # Warnings diff --git a/env/Lib/site-packages/pip/_vendor/requests/help.py b/env/Lib/site-packages/pip/_vendor/requests/help.py index 3c3072ba1419b4e0f183c5de1d0c39d166f2472c..2d292c2f062cd80cd108aac503eae7b635ceec8d 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/help.py +++ b/env/Lib/site-packages/pip/_vendor/requests/help.py @@ -1,17 +1,22 @@ """Module containing bug report helper(s).""" -from __future__ import print_function import json import platform -import sys import ssl +import sys from pip._vendor import idna from pip._vendor import urllib3 -from pip._vendor import chardet from . import __version__ as requests_version +charset_normalizer = None + +try: + from pip._vendor import chardet +except ImportError: + chardet = None + try: from pip._vendor.urllib3.contrib import pyopenssl except ImportError: @@ -19,16 +24,16 @@ except ImportError: OpenSSL = None cryptography = None else: - import OpenSSL import cryptography + import OpenSSL def _implementation(): """Return a dict with the Python implementation and version. Provide both the name and the version of the Python implementation - currently running. For example, on CPython 2.7.5 it will return - {'name': 'CPython', 'version': '2.7.5'}. + currently running. For example, on CPython 3.10.3 it will return + {'name': 'CPython', 'version': '3.10.3'}. This function works best on CPython and PyPy: in particular, it probably doesn't work for Jython or IronPython. Future investigation should be done @@ -36,76 +41,83 @@ def _implementation(): """ implementation = platform.python_implementation() - if implementation == 'CPython': + if implementation == "CPython": implementation_version = platform.python_version() - elif implementation == 'PyPy': - implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, - sys.pypy_version_info.minor, - sys.pypy_version_info.micro) - if sys.pypy_version_info.releaselevel != 'final': - implementation_version = ''.join([ - implementation_version, sys.pypy_version_info.releaselevel - ]) - elif implementation == 'Jython': + elif implementation == "PyPy": + implementation_version = "{}.{}.{}".format( + sys.pypy_version_info.major, + sys.pypy_version_info.minor, + sys.pypy_version_info.micro, + ) + if sys.pypy_version_info.releaselevel != "final": + implementation_version = "".join( + [implementation_version, sys.pypy_version_info.releaselevel] + ) + elif implementation == "Jython": implementation_version = platform.python_version() # Complete Guess - elif implementation == 'IronPython': + elif implementation == "IronPython": implementation_version = platform.python_version() # Complete Guess else: - implementation_version = 'Unknown' + implementation_version = "Unknown" - return {'name': implementation, 'version': implementation_version} + return {"name": implementation, "version": implementation_version} def info(): """Generate information for a bug report.""" try: platform_info = { - 'system': platform.system(), - 'release': platform.release(), + "system": platform.system(), + "release": platform.release(), } - except IOError: + except OSError: platform_info = { - 'system': 'Unknown', - 'release': 'Unknown', + "system": "Unknown", + "release": "Unknown", } implementation_info = _implementation() - urllib3_info = {'version': urllib3.__version__} - chardet_info = {'version': chardet.__version__} + urllib3_info = {"version": urllib3.__version__} + charset_normalizer_info = {"version": None} + chardet_info = {"version": None} + if charset_normalizer: + charset_normalizer_info = {"version": charset_normalizer.__version__} + if chardet: + chardet_info = {"version": chardet.__version__} pyopenssl_info = { - 'version': None, - 'openssl_version': '', + "version": None, + "openssl_version": "", } if OpenSSL: pyopenssl_info = { - 'version': OpenSSL.__version__, - 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, + "version": OpenSSL.__version__, + "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}", } cryptography_info = { - 'version': getattr(cryptography, '__version__', ''), + "version": getattr(cryptography, "__version__", ""), } idna_info = { - 'version': getattr(idna, '__version__', ''), + "version": getattr(idna, "__version__", ""), } system_ssl = ssl.OPENSSL_VERSION_NUMBER - system_ssl_info = { - 'version': '%x' % system_ssl if system_ssl is not None else '' - } + system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""} return { - 'platform': platform_info, - 'implementation': implementation_info, - 'system_ssl': system_ssl_info, - 'using_pyopenssl': pyopenssl is not None, - 'pyOpenSSL': pyopenssl_info, - 'urllib3': urllib3_info, - 'chardet': chardet_info, - 'cryptography': cryptography_info, - 'idna': idna_info, - 'requests': { - 'version': requests_version, + "platform": platform_info, + "implementation": implementation_info, + "system_ssl": system_ssl_info, + "using_pyopenssl": pyopenssl is not None, + "using_charset_normalizer": chardet is None, + "pyOpenSSL": pyopenssl_info, + "urllib3": urllib3_info, + "chardet": chardet_info, + "charset_normalizer": charset_normalizer_info, + "cryptography": cryptography_info, + "idna": idna_info, + "requests": { + "version": requests_version, }, } @@ -115,5 +127,5 @@ def main(): print(json.dumps(info(), sort_keys=True, indent=2)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/env/Lib/site-packages/pip/_vendor/requests/hooks.py b/env/Lib/site-packages/pip/_vendor/requests/hooks.py index 7a51f212c8a08fc6a04e509e89d8dcc85185df77..d181ba2ec2e55d274897315887b78fbdca757da8 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/hooks.py +++ b/env/Lib/site-packages/pip/_vendor/requests/hooks.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.hooks ~~~~~~~~~~~~~~ @@ -11,12 +9,13 @@ Available hooks: ``response``: The response generated from a Request. """ -HOOKS = ['response'] +HOOKS = ["response"] def default_hooks(): return {event: [] for event in HOOKS} + # TODO: response is the only one @@ -25,7 +24,7 @@ def dispatch_hook(key, hooks, hook_data, **kwargs): hooks = hooks or {} hooks = hooks.get(key) if hooks: - if hasattr(hooks, '__call__'): + if hasattr(hooks, "__call__"): hooks = [hooks] for hook in hooks: _hook_data = hook(hook_data, **kwargs) diff --git a/env/Lib/site-packages/pip/_vendor/requests/models.py b/env/Lib/site-packages/pip/_vendor/requests/models.py index b0ce2950f255e7ee1e223be9a27bba635801ce08..76e6f199c0042cec6500f53c062ff9ea1033e79d 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/models.py +++ b/env/Lib/site-packages/pip/_vendor/requests/models.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.models ~~~~~~~~~~~~~~~ @@ -8,46 +6,72 @@ This module contains the primary objects that power Requests. """ import datetime -import sys # Import encoding now, to avoid implicit import later. # Implicit import within threads may cause LookupError when standard library is in a ZIP, # such as in Embedded Python. See https://github.com/psf/requests/issues/3578. -import encodings.idna +import encodings.idna # noqa: F401 +from io import UnsupportedOperation +from pip._vendor.urllib3.exceptions import ( + DecodeError, + LocationParseError, + ProtocolError, + ReadTimeoutError, + SSLError, +) from pip._vendor.urllib3.fields import RequestField from pip._vendor.urllib3.filepost import encode_multipart_formdata from pip._vendor.urllib3.util import parse_url -from pip._vendor.urllib3.exceptions import ( - DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) -from io import UnsupportedOperation -from .hooks import default_hooks -from .structures import CaseInsensitiveDict - -from .auth import HTTPBasicAuth -from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar -from .exceptions import ( - HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, - ContentDecodingError, ConnectionError, StreamConsumedError) from ._internal_utils import to_native_string, unicode_is_ascii -from .utils import ( - guess_filename, get_auth_from_url, requote_uri, - stream_decode_response_unicode, to_key_val_list, parse_header_links, - iter_slices, guess_json_utf, super_len, check_header_validity) +from .auth import HTTPBasicAuth from .compat import ( - Callable, Mapping, - cookielib, urlunparse, urlsplit, urlencode, str, bytes, - is_py2, chardet, builtin_str, basestring) + Callable, + JSONDecodeError, + Mapping, + basestring, + builtin_str, + chardet, + cookielib, +) from .compat import json as complexjson +from .compat import urlencode, urlsplit, urlunparse +from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header +from .exceptions import ( + ChunkedEncodingError, + ConnectionError, + ContentDecodingError, + HTTPError, + InvalidJSONError, + InvalidURL, +) +from .exceptions import JSONDecodeError as RequestsJSONDecodeError +from .exceptions import MissingSchema +from .exceptions import SSLError as RequestsSSLError +from .exceptions import StreamConsumedError +from .hooks import default_hooks from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( + check_header_validity, + get_auth_from_url, + guess_filename, + guess_json_utf, + iter_slices, + parse_header_links, + requote_uri, + stream_decode_response_unicode, + super_len, + to_key_val_list, +) #: The set of HTTP status codes that indicate an automatically #: processable redirect. REDIRECT_STATI = ( - codes.moved, # 301 - codes.found, # 302 - codes.other, # 303 + codes.moved, # 301 + codes.found, # 302 + codes.other, # 303 codes.temporary_redirect, # 307 codes.permanent_redirect, # 308 ) @@ -57,7 +81,7 @@ CONTENT_CHUNK_SIZE = 10 * 1024 ITER_CHUNK_SIZE = 512 -class RequestEncodingMixin(object): +class RequestEncodingMixin: @property def path_url(self): """Build the path URL to use.""" @@ -68,16 +92,16 @@ class RequestEncodingMixin(object): path = p.path if not path: - path = '/' + path = "/" url.append(path) query = p.query if query: - url.append('?') + url.append("?") url.append(query) - return ''.join(url) + return "".join(url) @staticmethod def _encode_params(data): @@ -90,18 +114,21 @@ class RequestEncodingMixin(object): if isinstance(data, (str, bytes)): return data - elif hasattr(data, 'read'): + elif hasattr(data, "read"): return data - elif hasattr(data, '__iter__'): + elif hasattr(data, "__iter__"): result = [] for k, vs in to_key_val_list(data): - if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): + if isinstance(vs, basestring) or not hasattr(vs, "__iter__"): vs = [vs] for v in vs: if v is not None: result.append( - (k.encode('utf-8') if isinstance(k, str) else k, - v.encode('utf-8') if isinstance(v, str) else v)) + ( + k.encode("utf-8") if isinstance(k, str) else k, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) return urlencode(result, doseq=True) else: return data @@ -116,7 +143,7 @@ class RequestEncodingMixin(object): The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) or 4-tuples (filename, fileobj, contentype, custom_headers). """ - if (not files): + if not files: raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") @@ -126,7 +153,7 @@ class RequestEncodingMixin(object): files = to_key_val_list(files or {}) for field, val in fields: - if isinstance(val, basestring) or not hasattr(val, '__iter__'): + if isinstance(val, basestring) or not hasattr(val, "__iter__"): val = [val] for v in val: if v is not None: @@ -135,8 +162,13 @@ class RequestEncodingMixin(object): v = str(v) new_fields.append( - (field.decode('utf-8') if isinstance(field, bytes) else field, - v.encode('utf-8') if isinstance(v, str) else v)) + ( + field.decode("utf-8") + if isinstance(field, bytes) + else field, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) for (k, v) in files: # support for explicit filename @@ -155,7 +187,7 @@ class RequestEncodingMixin(object): if isinstance(fp, (str, bytes, bytearray)): fdata = fp - elif hasattr(fp, 'read'): + elif hasattr(fp, "read"): fdata = fp.read() elif fp is None: continue @@ -171,16 +203,16 @@ class RequestEncodingMixin(object): return body, content_type -class RequestHooksMixin(object): +class RequestHooksMixin: def register_hook(self, event, hook): """Properly register a hook.""" if event not in self.hooks: - raise ValueError('Unsupported event specified, with event name "%s"' % (event)) + raise ValueError(f'Unsupported event specified, with event name "{event}"') if isinstance(hook, Callable): self.hooks[event].append(hook) - elif hasattr(hook, '__iter__'): + elif hasattr(hook, "__iter__"): self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) def deregister_hook(self, event, hook): @@ -223,9 +255,19 @@ class Request(RequestHooksMixin): <PreparedRequest [GET]> """ - def __init__(self, - method=None, url=None, headers=None, files=None, data=None, - params=None, auth=None, cookies=None, hooks=None, json=None): + def __init__( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): # Default empty dicts for dict params. data = [] if data is None else data @@ -249,7 +291,7 @@ class Request(RequestHooksMixin): self.cookies = cookies def __repr__(self): - return '<Request [%s]>' % (self.method) + return f"<Request [{self.method}]>" def prepare(self): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.""" @@ -307,9 +349,19 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): #: integer denoting starting position of a readable file-like body. self._body_position = None - def prepare(self, - method=None, url=None, headers=None, files=None, data=None, - params=None, auth=None, cookies=None, hooks=None, json=None): + def prepare( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): """Prepares the entire request with the given parameters.""" self.prepare_method(method) @@ -326,7 +378,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): self.prepare_hooks(hooks) def __repr__(self): - return '<PreparedRequest [%s]>' % (self.method) + return f"<PreparedRequest [{self.method}]>" def copy(self): p = PreparedRequest() @@ -350,7 +402,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): from pip._vendor import idna try: - host = idna.encode(host, uts46=True).decode('utf-8') + host = idna.encode(host, uts46=True).decode("utf-8") except idna.IDNAError: raise UnicodeError return host @@ -363,9 +415,9 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): #: on python 3.x. #: https://github.com/psf/requests/pull/2238 if isinstance(url, bytes): - url = url.decode('utf8') + url = url.decode("utf8") else: - url = unicode(url) if is_py2 else str(url) + url = str(url) # Remove leading whitespaces from url url = url.lstrip() @@ -373,7 +425,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): # Don't do any URL preparation for non-HTTP schemes like `mailto`, # `data` etc to work around exceptions from `url_parse`, which # handles RFC 3986 only. - if ':' in url and not url.lower().startswith('http'): + if ":" in url and not url.lower().startswith("http"): self.url = url return @@ -384,13 +436,13 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): raise InvalidURL(*e.args) if not scheme: - error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?") - error = error.format(to_native_string(url, 'utf8')) - - raise MissingSchema(error) + raise MissingSchema( + f"Invalid URL {url!r}: No scheme supplied. " + f"Perhaps you meant https://{url}?" + ) if not host: - raise InvalidURL("Invalid URL %r: No host supplied" % url) + raise InvalidURL(f"Invalid URL {url!r}: No host supplied") # In general, we want to try IDNA encoding the hostname if the string contains # non-ASCII characters. This allows users to automatically get the correct IDNA @@ -400,33 +452,21 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): try: host = self._get_idna_encoded_host(host) except UnicodeError: - raise InvalidURL('URL has an invalid label.') - elif host.startswith(u'*'): - raise InvalidURL('URL has an invalid label.') + raise InvalidURL("URL has an invalid label.") + elif host.startswith(("*", ".")): + raise InvalidURL("URL has an invalid label.") # Carefully reconstruct the network location - netloc = auth or '' + netloc = auth or "" if netloc: - netloc += '@' + netloc += "@" netloc += host if port: - netloc += ':' + str(port) + netloc += f":{port}" # Bare domains aren't valid URLs. if not path: - path = '/' - - if is_py2: - if isinstance(scheme, str): - scheme = scheme.encode('utf-8') - if isinstance(netloc, str): - netloc = netloc.encode('utf-8') - if isinstance(path, str): - path = path.encode('utf-8') - if isinstance(query, str): - query = query.encode('utf-8') - if isinstance(fragment, str): - fragment = fragment.encode('utf-8') + path = "/" if isinstance(params, (str, bytes)): params = to_native_string(params) @@ -434,7 +474,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): enc_params = self._encode_params(params) if enc_params: if query: - query = '%s&%s' % (query, enc_params) + query = f"{query}&{enc_params}" else: query = enc_params @@ -465,15 +505,22 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): if not data and json is not None: # urllib3 requires a bytes-like body. Python 2's json.dumps # provides this natively, but Python 3 gives a Unicode string. - content_type = 'application/json' - body = complexjson.dumps(json) + content_type = "application/json" + + try: + body = complexjson.dumps(json, allow_nan=False) + except ValueError as ve: + raise InvalidJSONError(ve, request=self) + if not isinstance(body, bytes): - body = body.encode('utf-8') + body = body.encode("utf-8") - is_stream = all([ - hasattr(data, '__iter__'), - not isinstance(data, (basestring, list, tuple, Mapping)) - ]) + is_stream = all( + [ + hasattr(data, "__iter__"), + not isinstance(data, (basestring, list, tuple, Mapping)), + ] + ) if is_stream: try: @@ -483,24 +530,26 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): body = data - if getattr(body, 'tell', None) is not None: + if getattr(body, "tell", None) is not None: # Record the current file position before reading. # This will allow us to rewind a file in the event # of a redirect. try: self._body_position = body.tell() - except (IOError, OSError): + except OSError: # This differentiates from None, allowing us to catch # a failed `tell()` later when trying to rewind the body self._body_position = object() if files: - raise NotImplementedError('Streamed bodies and files are mutually exclusive.') + raise NotImplementedError( + "Streamed bodies and files are mutually exclusive." + ) if length: - self.headers['Content-Length'] = builtin_str(length) + self.headers["Content-Length"] = builtin_str(length) else: - self.headers['Transfer-Encoding'] = 'chunked' + self.headers["Transfer-Encoding"] = "chunked" else: # Multi-part file uploads. if files: @@ -508,16 +557,16 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): else: if data: body = self._encode_params(data) - if isinstance(data, basestring) or hasattr(data, 'read'): + if isinstance(data, basestring) or hasattr(data, "read"): content_type = None else: - content_type = 'application/x-www-form-urlencoded' + content_type = "application/x-www-form-urlencoded" self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. - if content_type and ('content-type' not in self.headers): - self.headers['Content-Type'] = content_type + if content_type and ("content-type" not in self.headers): + self.headers["Content-Type"] = content_type self.body = body @@ -528,13 +577,16 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): if length: # If length exists, set it. Otherwise, we fallback # to Transfer-Encoding: chunked. - self.headers['Content-Length'] = builtin_str(length) - elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: + self.headers["Content-Length"] = builtin_str(length) + elif ( + self.method not in ("GET", "HEAD") + and self.headers.get("Content-Length") is None + ): # Set Content-Length to 0 for methods that can have a body # but don't provide one. (i.e. not GET or HEAD) - self.headers['Content-Length'] = '0' + self.headers["Content-Length"] = "0" - def prepare_auth(self, auth, url=''): + def prepare_auth(self, auth, url=""): """Prepares the given HTTP auth data.""" # If no Auth is explicitly provided, extract it from the URL first. @@ -574,7 +626,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: - self.headers['Cookie'] = cookie_header + self.headers["Cookie"] = cookie_header def prepare_hooks(self, hooks): """Prepares the given hooks.""" @@ -586,14 +638,22 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): self.register_hook(event, hooks[event]) -class Response(object): +class Response: """The :class:`Response <Response>` object, which contains a server's response to an HTTP request. """ __attrs__ = [ - '_content', 'status_code', 'headers', 'url', 'history', - 'encoding', 'reason', 'cookies', 'elapsed', 'request' + "_content", + "status_code", + "headers", + "url", + "history", + "encoding", + "reason", + "cookies", + "elapsed", + "request", ] def __init__(self): @@ -662,11 +722,11 @@ class Response(object): setattr(self, name, value) # pickled objects do not have .raw - setattr(self, '_content_consumed', True) - setattr(self, 'raw', None) + setattr(self, "_content_consumed", True) + setattr(self, "raw", None) def __repr__(self): - return '<Response [%s]>' % (self.status_code) + return f"<Response [{self.status_code}]>" def __bool__(self): """Returns True if :attr:`status_code` is less than 400. @@ -712,12 +772,15 @@ class Response(object): """True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). """ - return ('location' in self.headers and self.status_code in REDIRECT_STATI) + return "location" in self.headers and self.status_code in REDIRECT_STATI @property def is_permanent_redirect(self): """True if this Response one of the permanent versions of redirect.""" - return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) + return "location" in self.headers and self.status_code in ( + codes.moved_permanently, + codes.permanent_redirect, + ) @property def next(self): @@ -726,8 +789,8 @@ class Response(object): @property def apparent_encoding(self): - """The apparent encoding, provided by the chardet library.""" - return chardet.detect(self.content)['encoding'] + """The apparent encoding, provided by the charset_normalizer or chardet libraries.""" + return chardet.detect(self.content)["encoding"] def iter_content(self, chunk_size=1, decode_unicode=False): """Iterates over the response data. When stream=True is set on the @@ -748,16 +811,17 @@ class Response(object): def generate(): # Special case for urllib3. - if hasattr(self.raw, 'stream'): + if hasattr(self.raw, "stream"): try: - for chunk in self.raw.stream(chunk_size, decode_content=True): - yield chunk + yield from self.raw.stream(chunk_size, decode_content=True) except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) + except SSLError as e: + raise RequestsSSLError(e) else: # Standard file-like object. while True: @@ -771,7 +835,9 @@ class Response(object): if self._content_consumed and isinstance(self._content, bool): raise StreamConsumedError() elif chunk_size is not None and not isinstance(chunk_size, int): - raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size)) + raise TypeError( + f"chunk_size must be an int, it is instead a {type(chunk_size)}." + ) # simulate reading small chunks of the content reused_chunks = iter_slices(self._content, chunk_size) @@ -784,7 +850,9 @@ class Response(object): return chunks - def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): + def iter_lines( + self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None + ): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. @@ -794,7 +862,9 @@ class Response(object): pending = None - for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): + for chunk in self.iter_content( + chunk_size=chunk_size, decode_unicode=decode_unicode + ): if pending is not None: chunk = pending + chunk @@ -809,8 +879,7 @@ class Response(object): else: pending = None - for line in lines: - yield line + yield from lines if pending is not None: yield pending @@ -822,13 +891,12 @@ class Response(object): if self._content is False: # Read the contents. if self._content_consumed: - raise RuntimeError( - 'The content for this response was already consumed') + raise RuntimeError("The content for this response was already consumed") if self.status_code == 0 or self.raw is None: self._content = None else: - self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b'' + self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b"" self._content_consumed = True # don't need to release the connection; that's been handled by urllib3 @@ -840,7 +908,7 @@ class Response(object): """Content of the response, in unicode. If Response.encoding is None, encoding will be guessed using - ``chardet``. + ``charset_normalizer`` or ``chardet``. The encoding of the response content is determined based solely on HTTP headers, following RFC 2616 to the letter. If you can take advantage of @@ -853,7 +921,7 @@ class Response(object): encoding = self.encoding if not self.content: - return str('') + return "" # Fallback to auto-detected encoding. if self.encoding is None: @@ -861,7 +929,7 @@ class Response(object): # Decode unicode from given encoding. try: - content = str(self.content, encoding, errors='replace') + content = str(self.content, encoding, errors="replace") except (LookupError, TypeError): # A LookupError is raised if the encoding was not found which could # indicate a misspelling or similar mistake. @@ -869,7 +937,7 @@ class Response(object): # A TypeError can be raised if encoding is None # # So we try blindly encoding. - content = str(self.content, errors='replace') + content = str(self.content, errors="replace") return content @@ -877,67 +945,77 @@ class Response(object): r"""Returns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. - :raises ValueError: If the response body does not contain valid json. + :raises requests.exceptions.JSONDecodeError: If the response body does not + contain valid json. """ if not self.encoding and self.content and len(self.content) > 3: # No encoding set. JSON RFC 4627 section 3 states we should expect # UTF-8, -16 or -32. Detect which one to use; If the detection or - # decoding fails, fall back to `self.text` (using chardet to make + # decoding fails, fall back to `self.text` (using charset_normalizer to make # a best guess). encoding = guess_json_utf(self.content) if encoding is not None: try: - return complexjson.loads( - self.content.decode(encoding), **kwargs - ) + return complexjson.loads(self.content.decode(encoding), **kwargs) except UnicodeDecodeError: # Wrong UTF codec detected; usually because it's not UTF-8 # but some other 8-bit codec. This is an RFC violation, # and the server didn't bother to tell us what codec *was* # used. pass - return complexjson.loads(self.text, **kwargs) + except JSONDecodeError as e: + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + + try: + return complexjson.loads(self.text, **kwargs) + except JSONDecodeError as e: + # Catch JSON-related errors and raise as requests.JSONDecodeError + # This aliases json.JSONDecodeError and simplejson.JSONDecodeError + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) @property def links(self): """Returns the parsed header links of the response, if any.""" - header = self.headers.get('link') + header = self.headers.get("link") - # l = MultiDict() - l = {} + resolved_links = {} if header: links = parse_header_links(header) for link in links: - key = link.get('rel') or link.get('url') - l[key] = link + key = link.get("rel") or link.get("url") + resolved_links[key] = link - return l + return resolved_links def raise_for_status(self): """Raises :class:`HTTPError`, if one occurred.""" - http_error_msg = '' + http_error_msg = "" if isinstance(self.reason, bytes): # We attempt to decode utf-8 first because some servers # choose to localize their reason strings. If the string # isn't utf-8, we fall back to iso-8859-1 for all other # encodings. (See PR #3538) try: - reason = self.reason.decode('utf-8') + reason = self.reason.decode("utf-8") except UnicodeDecodeError: - reason = self.reason.decode('iso-8859-1') + reason = self.reason.decode("iso-8859-1") else: reason = self.reason if 400 <= self.status_code < 500: - http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url) + http_error_msg = ( + f"{self.status_code} Client Error: {reason} for url: {self.url}" + ) elif 500 <= self.status_code < 600: - http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) + http_error_msg = ( + f"{self.status_code} Server Error: {reason} for url: {self.url}" + ) if http_error_msg: raise HTTPError(http_error_msg, response=self) @@ -951,6 +1029,6 @@ class Response(object): if not self._content_consumed: self.raw.close() - release_conn = getattr(self.raw, 'release_conn', None) + release_conn = getattr(self.raw, "release_conn", None) if release_conn is not None: release_conn() diff --git a/env/Lib/site-packages/pip/_vendor/requests/sessions.py b/env/Lib/site-packages/pip/_vendor/requests/sessions.py index 45ab8a5d3f698938c3d2c35cf412a0e601b9a4f1..dbcf2a7b0ee2898b72714b756e4b27fbbad4beab 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/sessions.py +++ b/env/Lib/site-packages/pip/_vendor/requests/sessions.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.sessions ~~~~~~~~~~~~~~~~~ @@ -10,39 +8,52 @@ requests (cookies, auth, proxies). import os import sys import time -from datetime import timedelta from collections import OrderedDict +from datetime import timedelta +from ._internal_utils import to_native_string +from .adapters import HTTPAdapter from .auth import _basic_auth_str -from .compat import cookielib, is_py3, urljoin, urlparse, Mapping +from .compat import Mapping, cookielib, urljoin, urlparse from .cookies import ( - cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) -from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT -from .hooks import default_hooks, dispatch_hook -from ._internal_utils import to_native_string -from .utils import to_key_val_list, default_headers, DEFAULT_PORTS + RequestsCookieJar, + cookiejar_from_dict, + extract_cookies_to_jar, + merge_cookies, +) from .exceptions import ( - TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) - -from .structures import CaseInsensitiveDict -from .adapters import HTTPAdapter - -from .utils import ( - requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, - get_auth_from_url, rewind_body + ChunkedEncodingError, + ContentDecodingError, + InvalidSchema, + TooManyRedirects, ) - -from .status_codes import codes +from .hooks import default_hooks, dispatch_hook # formerly defined here, reexposed here for backward compatibility -from .models import REDIRECT_STATI +from .models import ( # noqa: F401 + DEFAULT_REDIRECT_LIMIT, + REDIRECT_STATI, + PreparedRequest, + Request, +) +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( # noqa: F401 + DEFAULT_PORTS, + default_headers, + get_auth_from_url, + get_environ_proxies, + get_netrc_auth, + requote_uri, + resolve_proxies, + rewind_body, + should_bypass_proxies, + to_key_val_list, +) # Preferred clock, based on which one is more accurate on a given system. -if sys.platform == 'win32': - try: # Python 3.4+ - preferred_clock = time.perf_counter - except AttributeError: # Earlier than Python 3. - preferred_clock = time.clock +if sys.platform == "win32": + preferred_clock = time.perf_counter else: preferred_clock = time.time @@ -61,8 +72,7 @@ def merge_setting(request_setting, session_setting, dict_class=OrderedDict): # Bypass if not a dictionary (e.g. verify) if not ( - isinstance(session_setting, Mapping) and - isinstance(request_setting, Mapping) + isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting @@ -84,17 +94,16 @@ def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ - if session_hooks is None or session_hooks.get('response') == []: + if session_hooks is None or session_hooks.get("response") == []: return request_hooks - if request_hooks is None or request_hooks.get('response') == []: + if request_hooks is None or request_hooks.get("response") == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) -class SessionRedirectMixin(object): - +class SessionRedirectMixin: def get_redirect_target(self, resp): """Receives a Response. Returns a redirect URI or ``None``""" # Due to the nature of how requests processes redirects this method will @@ -104,16 +113,15 @@ class SessionRedirectMixin(object): # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: - location = resp.headers['location'] + location = resp.headers["location"] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. - if is_py3: - location = location.encode('latin1') - return to_native_string(location, 'utf8') + location = location.encode("latin1") + return to_native_string(location, "utf8") return None def should_strip_auth(self, old_url, new_url): @@ -126,23 +134,40 @@ class SessionRedirectMixin(object): # ports. This isn't specified by RFC 7235, but is kept to avoid # breaking backwards compatibility with older versions of requests # that allowed any redirects on the same host. - if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) - and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): + if ( + old_parsed.scheme == "http" + and old_parsed.port in (80, None) + and new_parsed.scheme == "https" + and new_parsed.port in (443, None) + ): return False # Handle default port usage corresponding to scheme. changed_port = old_parsed.port != new_parsed.port changed_scheme = old_parsed.scheme != new_parsed.scheme default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) - if (not changed_scheme and old_parsed.port in default_port - and new_parsed.port in default_port): + if ( + not changed_scheme + and old_parsed.port in default_port + and new_parsed.port in default_port + ): return False # Standard case: root URI must match return changed_port or changed_scheme - def resolve_redirects(self, resp, req, stream=False, timeout=None, - verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): + def resolve_redirects( + self, + resp, + req, + stream=False, + timeout=None, + verify=True, + cert=None, + proxies=None, + yield_requests=False, + **adapter_kwargs, + ): """Receives a Response. Returns a generator of Responses or Requests.""" hist = [] # keep track of history @@ -163,19 +188,21 @@ class SessionRedirectMixin(object): resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: - raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp) + raise TooManyRedirects( + f"Exceeded {self.max_redirects} redirects.", response=resp + ) # Release the connection back into the pool. resp.close() # Handle redirection without scheme (see: RFC 1808 Section 4) - if url.startswith('//'): + if url.startswith("//"): parsed_rurl = urlparse(resp.url) - url = ':'.join([to_native_string(parsed_rurl.scheme), url]) + url = ":".join([to_native_string(parsed_rurl.scheme), url]) # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) parsed = urlparse(url) - if parsed.fragment == '' and previous_fragment: + if parsed.fragment == "" and previous_fragment: parsed = parsed._replace(fragment=previous_fragment) elif parsed.fragment: previous_fragment = parsed.fragment @@ -194,15 +221,18 @@ class SessionRedirectMixin(object): self.rebuild_method(prepared_request, resp) # https://github.com/psf/requests/issues/1084 - if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): + if resp.status_code not in ( + codes.temporary_redirect, + codes.permanent_redirect, + ): # https://github.com/psf/requests/issues/3490 - purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') + purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers - headers.pop('Cookie', None) + headers.pop("Cookie", None) # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared @@ -218,9 +248,8 @@ class SessionRedirectMixin(object): # A failed tell() sets `_body_position` to `object()`. This non-None # value ensures `rewindable` will be True, allowing us to raise an # UnrewindableBodyError, instead of hanging the connection. - rewindable = ( - prepared_request._body_position is not None and - ('Content-Length' in headers or 'Transfer-Encoding' in headers) + rewindable = prepared_request._body_position is not None and ( + "Content-Length" in headers or "Transfer-Encoding" in headers ) # Attempt to rewind consumed file-like object. @@ -242,7 +271,7 @@ class SessionRedirectMixin(object): cert=cert, proxies=proxies, allow_redirects=False, - **adapter_kwargs + **adapter_kwargs, ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) @@ -259,17 +288,18 @@ class SessionRedirectMixin(object): headers = prepared_request.headers url = prepared_request.url - if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): + if "Authorization" in headers and self.should_strip_auth( + response.request.url, url + ): # If we get redirected to a new host, we should strip out any # authentication headers. - del headers['Authorization'] + del headers["Authorization"] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None if new_auth is not None: prepared_request.prepare_auth(new_auth) - def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by @@ -282,32 +312,22 @@ class SessionRedirectMixin(object): :rtype: dict """ - proxies = proxies if proxies is not None else {} headers = prepared_request.headers - url = prepared_request.url - scheme = urlparse(url).scheme - new_proxies = proxies.copy() - no_proxy = proxies.get('no_proxy') + scheme = urlparse(prepared_request.url).scheme + new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) - bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) - if self.trust_env and not bypass_proxy: - environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) - - proxy = environ_proxies.get(scheme, environ_proxies.get('all')) - - if proxy: - new_proxies.setdefault(scheme, proxy) - - if 'Proxy-Authorization' in headers: - del headers['Proxy-Authorization'] + if "Proxy-Authorization" in headers: + del headers["Proxy-Authorization"] try: username, password = get_auth_from_url(new_proxies[scheme]) except KeyError: username, password = None, None - if username and password: - headers['Proxy-Authorization'] = _basic_auth_str(username, password) + # urllib3 handles proxy authorization for us in the standard adapter. + # Avoid appending this to TLS tunneled requests where it may be leaked. + if not scheme.startswith('https') and username and password: + headers["Proxy-Authorization"] = _basic_auth_str(username, password) return new_proxies @@ -318,18 +338,18 @@ class SessionRedirectMixin(object): method = prepared_request.method # https://tools.ietf.org/html/rfc7231#section-6.4.4 - if response.status_code == codes.see_other and method != 'HEAD': - method = 'GET' + if response.status_code == codes.see_other and method != "HEAD": + method = "GET" # Do what the browsers do, despite standards... # First, turn 302s into GETs. - if response.status_code == codes.found and method != 'HEAD': - method = 'GET' + if response.status_code == codes.found and method != "HEAD": + method = "GET" # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. - if response.status_code == codes.moved and method == 'POST': - method = 'GET' + if response.status_code == codes.moved and method == "POST": + method = "GET" prepared_request.method = method @@ -354,9 +374,18 @@ class Session(SessionRedirectMixin): """ __attrs__ = [ - 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', - 'cert', 'adapters', 'stream', 'trust_env', - 'max_redirects', + "headers", + "cookies", + "auth", + "proxies", + "hooks", + "params", + "verify", + "cert", + "adapters", + "stream", + "trust_env", + "max_redirects", ] def __init__(self): @@ -418,8 +447,8 @@ class Session(SessionRedirectMixin): # Default connection adapters. self.adapters = OrderedDict() - self.mount('https://', HTTPAdapter()) - self.mount('http://', HTTPAdapter()) + self.mount("https://", HTTPAdapter()) + self.mount("http://", HTTPAdapter()) def __enter__(self): return self @@ -445,7 +474,8 @@ class Session(SessionRedirectMixin): # Merge with session cookies merged_cookies = merge_cookies( - merge_cookies(RequestsCookieJar(), self.cookies), cookies) + merge_cookies(RequestsCookieJar(), self.cookies), cookies + ) # Set environment's basic authentication if not explicitly set. auth = request.auth @@ -459,7 +489,9 @@ class Session(SessionRedirectMixin): files=request.files, data=request.data, json=request.json, - headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), + headers=merge_setting( + request.headers, self.headers, dict_class=CaseInsensitiveDict + ), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, @@ -467,10 +499,25 @@ class Session(SessionRedirectMixin): ) return p - def request(self, method, url, - params=None, data=None, headers=None, cookies=None, files=None, - auth=None, timeout=None, allow_redirects=True, proxies=None, - hooks=None, stream=None, verify=None, cert=None, json=None): + def request( + self, + method, + url, + params=None, + data=None, + headers=None, + cookies=None, + files=None, + auth=None, + timeout=None, + allow_redirects=True, + proxies=None, + hooks=None, + stream=None, + verify=None, + cert=None, + json=None, + ): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. @@ -506,7 +553,7 @@ class Session(SessionRedirectMixin): ``False``, requests will accept any TLS certificate presented by the server, and will ignore hostname mismatches and/or expired certificates, which will make your application vulnerable to - man-in-the-middle (MitM) attacks. Setting verify to ``False`` + man-in-the-middle (MitM) attacks. Setting verify to ``False`` may be useful during local development or testing. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. @@ -535,8 +582,8 @@ class Session(SessionRedirectMixin): # Send the request. send_kwargs = { - 'timeout': timeout, - 'allow_redirects': allow_redirects, + "timeout": timeout, + "allow_redirects": allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) @@ -551,8 +598,8 @@ class Session(SessionRedirectMixin): :rtype: requests.Response """ - kwargs.setdefault('allow_redirects', True) - return self.request('GET', url, **kwargs) + kwargs.setdefault("allow_redirects", True) + return self.request("GET", url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`Response` object. @@ -562,8 +609,8 @@ class Session(SessionRedirectMixin): :rtype: requests.Response """ - kwargs.setdefault('allow_redirects', True) - return self.request('OPTIONS', url, **kwargs) + kwargs.setdefault("allow_redirects", True) + return self.request("OPTIONS", url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`Response` object. @@ -573,8 +620,8 @@ class Session(SessionRedirectMixin): :rtype: requests.Response """ - kwargs.setdefault('allow_redirects', False) - return self.request('HEAD', url, **kwargs) + kwargs.setdefault("allow_redirects", False) + return self.request("HEAD", url, **kwargs) def post(self, url, data=None, json=None, **kwargs): r"""Sends a POST request. Returns :class:`Response` object. @@ -587,7 +634,7 @@ class Session(SessionRedirectMixin): :rtype: requests.Response """ - return self.request('POST', url, data=data, json=json, **kwargs) + return self.request("POST", url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`Response` object. @@ -599,7 +646,7 @@ class Session(SessionRedirectMixin): :rtype: requests.Response """ - return self.request('PUT', url, data=data, **kwargs) + return self.request("PUT", url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`Response` object. @@ -611,7 +658,7 @@ class Session(SessionRedirectMixin): :rtype: requests.Response """ - return self.request('PATCH', url, data=data, **kwargs) + return self.request("PATCH", url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`Response` object. @@ -621,7 +668,7 @@ class Session(SessionRedirectMixin): :rtype: requests.Response """ - return self.request('DELETE', url, **kwargs) + return self.request("DELETE", url, **kwargs) def send(self, request, **kwargs): """Send a given PreparedRequest. @@ -630,19 +677,20 @@ class Session(SessionRedirectMixin): """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. - kwargs.setdefault('stream', self.stream) - kwargs.setdefault('verify', self.verify) - kwargs.setdefault('cert', self.cert) - kwargs.setdefault('proxies', self.proxies) + kwargs.setdefault("stream", self.stream) + kwargs.setdefault("verify", self.verify) + kwargs.setdefault("cert", self.cert) + if "proxies" not in kwargs: + kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): - raise ValueError('You can only send PreparedRequests.') + raise ValueError("You can only send PreparedRequests.") # Set up variables needed for resolve_redirects and dispatching of hooks - allow_redirects = kwargs.pop('allow_redirects', True) - stream = kwargs.get('stream') + allow_redirects = kwargs.pop("allow_redirects", True) + stream = kwargs.get("stream") hooks = request.hooks # Get the appropriate adapter to use @@ -659,7 +707,7 @@ class Session(SessionRedirectMixin): r.elapsed = timedelta(seconds=elapsed) # Response manipulation hooks - r = dispatch_hook('response', hooks, r, **kwargs) + r = dispatch_hook("response", hooks, r, **kwargs) # Persist cookies if r.history: @@ -689,7 +737,9 @@ class Session(SessionRedirectMixin): # If redirects aren't being followed, store the response on the Request for Response.next(). if not allow_redirects: try: - r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) + r._next = next( + self.resolve_redirects(r, request, yield_requests=True, **kwargs) + ) except StopIteration: pass @@ -707,16 +757,19 @@ class Session(SessionRedirectMixin): # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. - no_proxy = proxies.get('no_proxy') if proxies is not None else None + no_proxy = proxies.get("no_proxy") if proxies is not None else None env_proxies = get_environ_proxies(url, no_proxy=no_proxy) for (k, v) in env_proxies.items(): proxies.setdefault(k, v) - # Look for requests environment configuration and be compatible - # with cURL. + # Look for requests environment configuration + # and be compatible with cURL. if verify is True or verify is None: - verify = (os.environ.get('REQUESTS_CA_BUNDLE') or - os.environ.get('CURL_CA_BUNDLE')) + verify = ( + os.environ.get("REQUESTS_CA_BUNDLE") + or os.environ.get("CURL_CA_BUNDLE") + or verify + ) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) @@ -724,8 +777,7 @@ class Session(SessionRedirectMixin): verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) - return {'verify': verify, 'proxies': proxies, 'stream': stream, - 'cert': cert} + return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} def get_adapter(self, url): """ @@ -739,7 +791,7 @@ class Session(SessionRedirectMixin): return adapter # Nothing matches :-/ - raise InvalidSchema("No connection adapters were found for {!r}".format(url)) + raise InvalidSchema(f"No connection adapters were found for {url!r}") def close(self): """Closes all adapters and as such the session""" diff --git a/env/Lib/site-packages/pip/_vendor/requests/status_codes.py b/env/Lib/site-packages/pip/_vendor/requests/status_codes.py index d80a7cd4dd486d2927a3c0f2f3e684bcf5f8e49d..4bd072be9769748a852740d037d5c63021472c9d 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/status_codes.py +++ b/env/Lib/site-packages/pip/_vendor/requests/status_codes.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - r""" The ``codes`` object defines a mapping from common names for HTTP statuses to their numerical codes, accessible either as attributes or as dictionary @@ -23,101 +21,108 @@ the names are allowed. For example, ``codes.ok``, ``codes.OK``, and from .structures import LookupDict _codes = { - # Informational. - 100: ('continue',), - 101: ('switching_protocols',), - 102: ('processing',), - 103: ('checkpoint',), - 122: ('uri_too_long', 'request_uri_too_long'), - 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), - 201: ('created',), - 202: ('accepted',), - 203: ('non_authoritative_info', 'non_authoritative_information'), - 204: ('no_content',), - 205: ('reset_content', 'reset'), - 206: ('partial_content', 'partial'), - 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), - 208: ('already_reported',), - 226: ('im_used',), - + 100: ("continue",), + 101: ("switching_protocols",), + 102: ("processing",), + 103: ("checkpoint",), + 122: ("uri_too_long", "request_uri_too_long"), + 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), + 201: ("created",), + 202: ("accepted",), + 203: ("non_authoritative_info", "non_authoritative_information"), + 204: ("no_content",), + 205: ("reset_content", "reset"), + 206: ("partial_content", "partial"), + 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), + 208: ("already_reported",), + 226: ("im_used",), # Redirection. - 300: ('multiple_choices',), - 301: ('moved_permanently', 'moved', '\\o-'), - 302: ('found',), - 303: ('see_other', 'other'), - 304: ('not_modified',), - 305: ('use_proxy',), - 306: ('switch_proxy',), - 307: ('temporary_redirect', 'temporary_moved', 'temporary'), - 308: ('permanent_redirect', - 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 - + 300: ("multiple_choices",), + 301: ("moved_permanently", "moved", "\\o-"), + 302: ("found",), + 303: ("see_other", "other"), + 304: ("not_modified",), + 305: ("use_proxy",), + 306: ("switch_proxy",), + 307: ("temporary_redirect", "temporary_moved", "temporary"), + 308: ( + "permanent_redirect", + "resume_incomplete", + "resume", + ), # "resume" and "resume_incomplete" to be removed in 3.0 # Client Error. - 400: ('bad_request', 'bad'), - 401: ('unauthorized',), - 402: ('payment_required', 'payment'), - 403: ('forbidden',), - 404: ('not_found', '-o-'), - 405: ('method_not_allowed', 'not_allowed'), - 406: ('not_acceptable',), - 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), - 408: ('request_timeout', 'timeout'), - 409: ('conflict',), - 410: ('gone',), - 411: ('length_required',), - 412: ('precondition_failed', 'precondition'), - 413: ('request_entity_too_large',), - 414: ('request_uri_too_large',), - 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), - 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), - 417: ('expectation_failed',), - 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), - 421: ('misdirected_request',), - 422: ('unprocessable_entity', 'unprocessable'), - 423: ('locked',), - 424: ('failed_dependency', 'dependency'), - 425: ('unordered_collection', 'unordered'), - 426: ('upgrade_required', 'upgrade'), - 428: ('precondition_required', 'precondition'), - 429: ('too_many_requests', 'too_many'), - 431: ('header_fields_too_large', 'fields_too_large'), - 444: ('no_response', 'none'), - 449: ('retry_with', 'retry'), - 450: ('blocked_by_windows_parental_controls', 'parental_controls'), - 451: ('unavailable_for_legal_reasons', 'legal_reasons'), - 499: ('client_closed_request',), - + 400: ("bad_request", "bad"), + 401: ("unauthorized",), + 402: ("payment_required", "payment"), + 403: ("forbidden",), + 404: ("not_found", "-o-"), + 405: ("method_not_allowed", "not_allowed"), + 406: ("not_acceptable",), + 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), + 408: ("request_timeout", "timeout"), + 409: ("conflict",), + 410: ("gone",), + 411: ("length_required",), + 412: ("precondition_failed", "precondition"), + 413: ("request_entity_too_large",), + 414: ("request_uri_too_large",), + 415: ("unsupported_media_type", "unsupported_media", "media_type"), + 416: ( + "requested_range_not_satisfiable", + "requested_range", + "range_not_satisfiable", + ), + 417: ("expectation_failed",), + 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), + 421: ("misdirected_request",), + 422: ("unprocessable_entity", "unprocessable"), + 423: ("locked",), + 424: ("failed_dependency", "dependency"), + 425: ("unordered_collection", "unordered"), + 426: ("upgrade_required", "upgrade"), + 428: ("precondition_required", "precondition"), + 429: ("too_many_requests", "too_many"), + 431: ("header_fields_too_large", "fields_too_large"), + 444: ("no_response", "none"), + 449: ("retry_with", "retry"), + 450: ("blocked_by_windows_parental_controls", "parental_controls"), + 451: ("unavailable_for_legal_reasons", "legal_reasons"), + 499: ("client_closed_request",), # Server Error. - 500: ('internal_server_error', 'server_error', '/o\\', '✗'), - 501: ('not_implemented',), - 502: ('bad_gateway',), - 503: ('service_unavailable', 'unavailable'), - 504: ('gateway_timeout',), - 505: ('http_version_not_supported', 'http_version'), - 506: ('variant_also_negotiates',), - 507: ('insufficient_storage',), - 509: ('bandwidth_limit_exceeded', 'bandwidth'), - 510: ('not_extended',), - 511: ('network_authentication_required', 'network_auth', 'network_authentication'), + 500: ("internal_server_error", "server_error", "/o\\", "✗"), + 501: ("not_implemented",), + 502: ("bad_gateway",), + 503: ("service_unavailable", "unavailable"), + 504: ("gateway_timeout",), + 505: ("http_version_not_supported", "http_version"), + 506: ("variant_also_negotiates",), + 507: ("insufficient_storage",), + 509: ("bandwidth_limit_exceeded", "bandwidth"), + 510: ("not_extended",), + 511: ("network_authentication_required", "network_auth", "network_authentication"), } -codes = LookupDict(name='status_codes') +codes = LookupDict(name="status_codes") + def _init(): for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) - if not title.startswith(('\\', '/')): + if not title.startswith(("\\", "/")): setattr(codes, title.upper(), code) def doc(code): - names = ', '.join('``%s``' % n for n in _codes[code]) - return '* %d: %s' % (code, names) + names = ", ".join(f"``{n}``" for n in _codes[code]) + return "* %d: %s" % (code, names) global __doc__ - __doc__ = (__doc__ + '\n' + - '\n'.join(doc(code) for code in sorted(_codes)) - if __doc__ is not None else None) + __doc__ = ( + __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) + if __doc__ is not None + else None + ) + _init() diff --git a/env/Lib/site-packages/pip/_vendor/requests/structures.py b/env/Lib/site-packages/pip/_vendor/requests/structures.py index 8ee0ba7a082a04bfb91a6e1c7d80c5d51c0a2573..188e13e4829591facb23ae0e2eda84b9807cb818 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/structures.py +++ b/env/Lib/site-packages/pip/_vendor/requests/structures.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.structures ~~~~~~~~~~~~~~~~~~~ @@ -64,11 +62,7 @@ class CaseInsensitiveDict(MutableMapping): def lower_items(self): """Like iteritems(), but with all lowercase keys.""" - return ( - (lowerkey, keyval[1]) - for (lowerkey, keyval) - in self._store.items() - ) + return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) def __eq__(self, other): if isinstance(other, Mapping): @@ -91,10 +85,10 @@ class LookupDict(dict): def __init__(self, name=None): self.name = name - super(LookupDict, self).__init__() + super().__init__() def __repr__(self): - return '<lookup \'%s\'>' % (self.name) + return f"<lookup '{self.name}'>" def __getitem__(self, key): # We allow fall-through here, so values default to None diff --git a/env/Lib/site-packages/pip/_vendor/requests/utils.py b/env/Lib/site-packages/pip/_vendor/requests/utils.py index db67938e67e4766b403f23b811c1ecb8bc9057bb..36607eda2ec5b8bdc4fe87e256cc8f3b1a79f707 100644 --- a/env/Lib/site-packages/pip/_vendor/requests/utils.py +++ b/env/Lib/site-packages/pip/_vendor/requests/utils.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - """ requests.utils ~~~~~~~~~~~~~~ @@ -21,49 +19,76 @@ import warnings import zipfile from collections import OrderedDict -from .__version__ import __version__ +from pip._vendor.urllib3.util import make_headers, parse_url + from . import certs +from .__version__ import __version__ + # to_native_string is unused here, but imported here for backwards compatibility -from ._internal_utils import to_native_string +from ._internal_utils import ( # noqa: F401 + _HEADER_VALIDATORS_BYTE, + _HEADER_VALIDATORS_STR, + HEADER_VALIDATORS, + to_native_string, +) +from .compat import ( + Mapping, + basestring, + bytes, + getproxies, + getproxies_environment, + integer_types, +) from .compat import parse_http_list as _parse_list_header from .compat import ( - quote, urlparse, bytes, str, unquote, getproxies, - proxy_bypass, urlunparse, basestring, integer_types, is_py3, - proxy_bypass_environment, getproxies_environment, Mapping) + proxy_bypass, + proxy_bypass_environment, + quote, + str, + unquote, + urlparse, + urlunparse, +) from .cookies import cookiejar_from_dict -from .structures import CaseInsensitiveDict from .exceptions import ( - InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError) + FileModeWarning, + InvalidHeader, + InvalidURL, + UnrewindableBodyError, +) +from .structures import CaseInsensitiveDict -NETRC_FILES = ('.netrc', '_netrc') +NETRC_FILES = (".netrc", "_netrc") DEFAULT_CA_BUNDLE_PATH = certs.where() -DEFAULT_PORTS = {'http': 80, 'https': 443} +DEFAULT_PORTS = {"http": 80, "https": 443} + +# Ensure that ', ' is used to preserve previous delimiter behavior. +DEFAULT_ACCEPT_ENCODING = ", ".join( + re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"]) +) -if sys.platform == 'win32': +if sys.platform == "win32": # provide a proxy_bypass version on Windows without DNS lookups def proxy_bypass_registry(host): try: - if is_py3: - import winreg - else: - import _winreg as winreg + import winreg except ImportError: return False try: - internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + internetSettings = winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", + ) # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it - proxyEnable = int(winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0]) + proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) # ProxyOverride is almost always a string - proxyOverride = winreg.QueryValueEx(internetSettings, - 'ProxyOverride')[0] - except OSError: + proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] + except (OSError, ValueError): return False if not proxyEnable or not proxyOverride: return False @@ -71,15 +96,15 @@ if sys.platform == 'win32': # make a check value list from the registry entry: replace the # '<local>' string by the localhost entry and the corresponding # canonical entry. - proxyOverride = proxyOverride.split(';') + proxyOverride = proxyOverride.split(";") # now check if we match one of the registry values. for test in proxyOverride: - if test == '<local>': - if '.' not in host: + if test == "<local>": + if "." not in host: return True - test = test.replace(".", r"\.") # mask dots - test = test.replace("*", r".*") # change glob sequence - test = test.replace("?", r".") # change glob char + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char if re.match(test, host, re.I): return True return False @@ -99,7 +124,7 @@ if sys.platform == 'win32': def dict_to_sequence(d): """Returns an internal sequence dictionary update.""" - if hasattr(d, 'items'): + if hasattr(d, "items"): d = d.items() return d @@ -109,37 +134,42 @@ def super_len(o): total_length = None current_position = 0 - if hasattr(o, '__len__'): + if hasattr(o, "__len__"): total_length = len(o) - elif hasattr(o, 'len'): + elif hasattr(o, "len"): total_length = o.len - elif hasattr(o, 'fileno'): + elif hasattr(o, "fileno"): try: fileno = o.fileno() - except io.UnsupportedOperation: + except (io.UnsupportedOperation, AttributeError): + # AttributeError is a surprising exception, seeing as how we've just checked + # that `hasattr(o, 'fileno')`. It happens for objects obtained via + # `Tarfile.extractfile()`, per issue 5229. pass else: total_length = os.fstat(fileno).st_size # Having used fstat to determine the file length, we need to # confirm that this file was opened up in binary mode. - if 'b' not in o.mode: - warnings.warn(( - "Requests has determined the content-length for this " - "request using the binary size of the file: however, the " - "file has been opened in text mode (i.e. without the 'b' " - "flag in the mode). This may lead to an incorrect " - "content-length. In Requests 3.0, support will be removed " - "for files in text mode."), - FileModeWarning + if "b" not in o.mode: + warnings.warn( + ( + "Requests has determined the content-length for this " + "request using the binary size of the file: however, the " + "file has been opened in text mode (i.e. without the 'b' " + "flag in the mode). This may lead to an incorrect " + "content-length. In Requests 3.0, support will be removed " + "for files in text mode." + ), + FileModeWarning, ) - if hasattr(o, 'tell'): + if hasattr(o, "tell"): try: current_position = o.tell() - except (OSError, IOError): + except OSError: # This can happen in some weird situations, such as when the file # is actually a special file descriptor like stdin. In this # instance, we don't know what the length is, so set it to zero and @@ -147,8 +177,8 @@ def super_len(o): if total_length is not None: current_position = total_length else: - if hasattr(o, 'seek') and total_length is None: - # StringIO and BytesIO have seek but no useable fileno + if hasattr(o, "seek") and total_length is None: + # StringIO and BytesIO have seek but no usable fileno try: # seek to end of file o.seek(0, 2) @@ -157,7 +187,7 @@ def super_len(o): # seek back to current position to support # partially read file-like objects o.seek(current_position or 0) - except (OSError, IOError): + except OSError: total_length = 0 if total_length is None: @@ -169,14 +199,14 @@ def super_len(o): def get_netrc_auth(url, raise_errors=False): """Returns the Requests tuple auth for a given url from netrc.""" - netrc_file = os.environ.get('NETRC') + netrc_file = os.environ.get("NETRC") if netrc_file is not None: netrc_locations = (netrc_file,) else: - netrc_locations = ('~/{}'.format(f) for f in NETRC_FILES) + netrc_locations = (f"~/{f}" for f in NETRC_FILES) try: - from netrc import netrc, NetrcParseError + from netrc import NetrcParseError, netrc netrc_path = None @@ -201,18 +231,18 @@ def get_netrc_auth(url, raise_errors=False): # Strip port numbers from netloc. This weird `if...encode`` dance is # used for Python 3.2, which doesn't support unicode literals. - splitstr = b':' + splitstr = b":" if isinstance(url, str): - splitstr = splitstr.decode('ascii') + splitstr = splitstr.decode("ascii") host = ri.netloc.split(splitstr)[0] try: _netrc = netrc(netrc_path).authenticators(host) if _netrc: # Return with login / password - login_i = (0 if _netrc[0] else 1) + login_i = 0 if _netrc[0] else 1 return (_netrc[login_i], _netrc[2]) - except (NetrcParseError, IOError): + except (NetrcParseError, OSError): # If there was a parsing error or a permissions issue reading the file, # we'll just skip netrc auth unless explicitly asked to raise errors. if raise_errors: @@ -225,9 +255,8 @@ def get_netrc_auth(url, raise_errors=False): def guess_filename(obj): """Tries to guess the filename of the given object.""" - name = getattr(obj, 'name', None) - if (name and isinstance(name, basestring) and name[0] != '<' and - name[-1] != '>'): + name = getattr(obj, "name", None) + if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": return os.path.basename(name) @@ -245,7 +274,11 @@ def extract_zipped_paths(path): archive, member = os.path.split(path) while archive and not os.path.exists(archive): archive, prefix = os.path.split(archive) - member = '/'.join([prefix, member]) + if not prefix: + # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), + # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users + break + member = "/".join([prefix, member]) if not zipfile.is_zipfile(archive): return path @@ -256,13 +289,27 @@ def extract_zipped_paths(path): # we have a valid zip archive and a valid member of that archive tmp = tempfile.gettempdir() - extracted_path = os.path.join(tmp, *member.split('/')) + extracted_path = os.path.join(tmp, member.split("/")[-1]) if not os.path.exists(extracted_path): - extracted_path = zip_file.extract(member, path=tmp) - + # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition + with atomic_open(extracted_path) as file_handler: + file_handler.write(zip_file.read(member)) return extracted_path +@contextlib.contextmanager +def atomic_open(filename): + """Write a file to the disk in an atomic fashion""" + tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) + try: + with os.fdopen(tmp_descriptor, "wb") as tmp_handler: + yield tmp_handler + os.replace(tmp_name, filename) + except BaseException: + os.remove(tmp_name) + raise + + def from_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an @@ -285,7 +332,7 @@ def from_key_val_list(value): return None if isinstance(value, (str, bytes, bool, int)): - raise ValueError('cannot encode objects that are not 2-tuples') + raise ValueError("cannot encode objects that are not 2-tuples") return OrderedDict(value) @@ -311,7 +358,7 @@ def to_key_val_list(value): return None if isinstance(value, (str, bytes, bool, int)): - raise ValueError('cannot encode objects that are not 2-tuples') + raise ValueError("cannot encode objects that are not 2-tuples") if isinstance(value, Mapping): value = value.items() @@ -376,10 +423,10 @@ def parse_dict_header(value): """ result = {} for item in _parse_list_header(value): - if '=' not in item: + if "=" not in item: result[item] = None continue - name, value = item.split('=', 1) + name, value = item.split("=", 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value @@ -407,8 +454,8 @@ def unquote_header_value(value, is_filename=False): # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. - if not is_filename or value[:2] != '\\\\': - return value.replace('\\\\', '\\').replace('\\"', '"') + if not is_filename or value[:2] != "\\\\": + return value.replace("\\\\", "\\").replace('\\"', '"') return value @@ -443,19 +490,24 @@ def get_encodings_from_content(content): :param content: bytestring to extract encodings from. """ - warnings.warn(( - 'In requests 3.0, get_encodings_from_content will be removed. For ' - 'more information, please see the discussion on issue #2266. (This' - ' warning should only appear once.)'), - DeprecationWarning) + warnings.warn( + ( + "In requests 3.0, get_encodings_from_content will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I) xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') - return (charset_re.findall(content) + - pragma_re.findall(content) + - xml_re.findall(content)) + return ( + charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content) + ) def _parse_content_type_header(header): @@ -466,7 +518,7 @@ def _parse_content_type_header(header): parameters """ - tokens = header.split(';') + tokens = header.split(";") content_type, params = tokens[0].strip(), tokens[1:] params_dict = {} items_to_strip = "\"' " @@ -478,7 +530,7 @@ def _parse_content_type_header(header): index_of_equals = param.find("=") if index_of_equals != -1: key = param[:index_of_equals].strip(items_to_strip) - value = param[index_of_equals + 1:].strip(items_to_strip) + value = param[index_of_equals + 1 :].strip(items_to_strip) params_dict[key.lower()] = value return content_type, params_dict @@ -490,38 +542,37 @@ def get_encoding_from_headers(headers): :rtype: str """ - content_type = headers.get('content-type') + content_type = headers.get("content-type") if not content_type: return None content_type, params = _parse_content_type_header(content_type) - if 'charset' in params: - return params['charset'].strip("'\"") + if "charset" in params: + return params["charset"].strip("'\"") - if 'text' in content_type: - return 'ISO-8859-1' + if "text" in content_type: + return "ISO-8859-1" - if 'application/json' in content_type: + if "application/json" in content_type: # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset - return 'utf-8' + return "utf-8" def stream_decode_response_unicode(iterator, r): - """Stream decodes a iterator.""" + """Stream decodes an iterator.""" if r.encoding is None: - for item in iterator: - yield item + yield from iterator return - decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') + decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") for chunk in iterator: rv = decoder.decode(chunk) if rv: yield rv - rv = decoder.decode(b'', final=True) + rv = decoder.decode(b"", final=True) if rv: yield rv @@ -532,7 +583,7 @@ def iter_slices(string, slice_length): if slice_length is None or slice_length <= 0: slice_length = len(string) while pos < len(string): - yield string[pos:pos + slice_length] + yield string[pos : pos + slice_length] pos += slice_length @@ -548,11 +599,14 @@ def get_unicode_from_response(r): :rtype: str """ - warnings.warn(( - 'In requests 3.0, get_unicode_from_response will be removed. For ' - 'more information, please see the discussion on issue #2266. (This' - ' warning should only appear once.)'), - DeprecationWarning) + warnings.warn( + ( + "In requests 3.0, get_unicode_from_response will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) tried_encodings = [] @@ -567,14 +621,15 @@ def get_unicode_from_response(r): # Fall back: try: - return str(r.content, encoding, errors='replace') + return str(r.content, encoding, errors="replace") except TypeError: return r.content # The unreserved URI characters (RFC 3986) UNRESERVED_SET = frozenset( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" +) def unquote_unreserved(uri): @@ -583,22 +638,22 @@ def unquote_unreserved(uri): :rtype: str """ - parts = uri.split('%') + parts = uri.split("%") for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): try: c = chr(int(h, 16)) except ValueError: - raise InvalidURL("Invalid percent-escape sequence: '%s'" % h) + raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: - parts[i] = '%' + parts[i] + parts[i] = f"%{parts[i]}" else: - parts[i] = '%' + parts[i] - return ''.join(parts) + parts[i] = f"%{parts[i]}" + return "".join(parts) def requote_uri(uri): @@ -631,10 +686,10 @@ def address_in_network(ip, net): :rtype: bool """ - ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] - netaddr, bits = net.split('/') - netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] - network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask + ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] + netaddr, bits = net.split("/") + netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask return (ipaddr & netmask) == (network & netmask) @@ -645,8 +700,8 @@ def dotted_netmask(mask): :rtype: str """ - bits = 0xffffffff ^ (1 << 32 - mask) - 1 - return socket.inet_ntoa(struct.pack('>I', bits)) + bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack(">I", bits)) def is_ipv4_address(string_ip): @@ -655,7 +710,7 @@ def is_ipv4_address(string_ip): """ try: socket.inet_aton(string_ip) - except socket.error: + except OSError: return False return True @@ -666,9 +721,9 @@ def is_valid_cidr(string_network): :rtype: bool """ - if string_network.count('/') == 1: + if string_network.count("/") == 1: try: - mask = int(string_network.split('/')[1]) + mask = int(string_network.split("/")[1]) except ValueError: return False @@ -676,8 +731,8 @@ def is_valid_cidr(string_network): return False try: - socket.inet_aton(string_network.split('/')[0]) - except socket.error: + socket.inet_aton(string_network.split("/")[0]) + except OSError: return False else: return False @@ -714,13 +769,14 @@ def should_bypass_proxies(url, no_proxy): """ # Prioritize lowercase environment variables over uppercase # to keep a consistent behaviour with other http projects (curl, wget). - get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) + def get_proxy(key): + return os.environ.get(key) or os.environ.get(key.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy_arg = no_proxy if no_proxy is None: - no_proxy = get_proxy('no_proxy') + no_proxy = get_proxy("no_proxy") parsed = urlparse(url) if parsed.hostname is None: @@ -730,9 +786,7 @@ def should_bypass_proxies(url, no_proxy): if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the hostname, both with and without the port. - no_proxy = ( - host for host in no_proxy.replace(' ', '').split(',') if host - ) + no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) if is_ipv4_address(parsed.hostname): for proxy_ip in no_proxy: @@ -746,7 +800,7 @@ def should_bypass_proxies(url, no_proxy): else: host_with_port = parsed.hostname if parsed.port: - host_with_port += ':{}'.format(parsed.port) + host_with_port += f":{parsed.port}" for host in no_proxy: if parsed.hostname.endswith(host) or host_with_port.endswith(host): @@ -754,7 +808,7 @@ def should_bypass_proxies(url, no_proxy): # to apply the proxies on this URL. return True - with set_environ('no_proxy', no_proxy_arg): + with set_environ("no_proxy", no_proxy_arg): # parsed.hostname can be `None` in cases such as a file URI. try: bypass = proxy_bypass(parsed.hostname) @@ -788,13 +842,13 @@ def select_proxy(url, proxies): proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: - return proxies.get(urlparts.scheme, proxies.get('all')) + return proxies.get(urlparts.scheme, proxies.get("all")) proxy_keys = [ - urlparts.scheme + '://' + urlparts.hostname, + urlparts.scheme + "://" + urlparts.hostname, urlparts.scheme, - 'all://' + urlparts.hostname, - 'all', + "all://" + urlparts.hostname, + "all", ] proxy = None for proxy_key in proxy_keys: @@ -805,25 +859,54 @@ def select_proxy(url, proxies): return proxy +def resolve_proxies(request, proxies, trust_env=True): + """This method takes proxy information from a request and configuration + input to resolve a mapping of target proxies. This will consider settings + such a NO_PROXY to strip proxy configurations. + + :param request: Request or PreparedRequest + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + :param trust_env: Boolean declaring whether to trust environment configs + + :rtype: dict + """ + proxies = proxies if proxies is not None else {} + url = request.url + scheme = urlparse(url).scheme + no_proxy = proxies.get("no_proxy") + new_proxies = proxies.copy() + + if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): + environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) + + proxy = environ_proxies.get(scheme, environ_proxies.get("all")) + + if proxy: + new_proxies.setdefault(scheme, proxy) + return new_proxies + + def default_user_agent(name="python-requests"): """ Return a string representing the default user agent. :rtype: str """ - return '%s/%s' % (name, __version__) + return f"{name}/{__version__}" def default_headers(): """ :rtype: requests.structures.CaseInsensitiveDict """ - return CaseInsensitiveDict({ - 'User-Agent': default_user_agent(), - 'Accept-Encoding': ', '.join(('gzip', 'deflate')), - 'Accept': '*/*', - 'Connection': 'keep-alive', - }) + return CaseInsensitiveDict( + { + "User-Agent": default_user_agent(), + "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, + "Accept": "*/*", + "Connection": "keep-alive", + } + ) def parse_header_links(value): @@ -836,23 +919,23 @@ def parse_header_links(value): links = [] - replace_chars = ' \'"' + replace_chars = " '\"" value = value.strip(replace_chars) if not value: return links - for val in re.split(', *<', value): + for val in re.split(", *<", value): try: - url, params = val.split(';', 1) + url, params = val.split(";", 1) except ValueError: - url, params = val, '' + url, params = val, "" - link = {'url': url.strip('<> \'"')} + link = {"url": url.strip("<> '\"")} - for param in params.split(';'): + for param in params.split(";"): try: - key, value = param.split('=') + key, value = param.split("=") except ValueError: break @@ -864,7 +947,7 @@ def parse_header_links(value): # Null bytes; no need to recreate these on each call to guess_json_utf -_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 +_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 _null2 = _null * 2 _null3 = _null * 3 @@ -878,25 +961,25 @@ def guess_json_utf(data): # determine the encoding. Also detect a BOM, if present. sample = data[:4] if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): - return 'utf-32' # BOM included + return "utf-32" # BOM included if sample[:3] == codecs.BOM_UTF8: - return 'utf-8-sig' # BOM included, MS style (discouraged) + return "utf-8-sig" # BOM included, MS style (discouraged) if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): - return 'utf-16' # BOM included + return "utf-16" # BOM included nullcount = sample.count(_null) if nullcount == 0: - return 'utf-8' + return "utf-8" if nullcount == 2: - if sample[::2] == _null2: # 1st and 3rd are null - return 'utf-16-be' + if sample[::2] == _null2: # 1st and 3rd are null + return "utf-16-be" if sample[1::2] == _null2: # 2nd and 4th are null - return 'utf-16-le' + return "utf-16-le" # Did not detect 2 valid UTF-16 ascii-range characters if nullcount == 3: if sample[:3] == _null3: - return 'utf-32-be' + return "utf-32-be" if sample[1:] == _null3: - return 'utf-32-le' + return "utf-32-le" # Did not detect a valid UTF-32 ascii-range character return None @@ -907,15 +990,27 @@ def prepend_scheme_if_needed(url, new_scheme): :rtype: str """ - scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) - - # urlparse is a finicky beast, and sometimes decides that there isn't a - # netloc present. Assume that it's being over-cautious, and switch netloc - # and path if urlparse decided there was no netloc. + parsed = parse_url(url) + scheme, auth, host, port, path, query, fragment = parsed + + # A defect in urlparse determines that there isn't a netloc present in some + # urls. We previously assumed parsing was overly cautious, and swapped the + # netloc and path. Due to a lack of tests on the original defect, this is + # maintained with parse_url for backwards compatibility. + netloc = parsed.netloc if not netloc: netloc, path = path, netloc - return urlunparse((scheme, netloc, path, params, query, fragment)) + if auth: + # parse_url doesn't provide the netloc with auth + # so we'll add it ourselves. + netloc = "@".join([auth, netloc]) + if scheme is None: + scheme = new_scheme + if path is None: + path = "" + + return urlunparse((scheme, netloc, path, "", query, fragment)) def get_auth_from_url(url): @@ -929,35 +1024,39 @@ def get_auth_from_url(url): try: auth = (unquote(parsed.username), unquote(parsed.password)) except (AttributeError, TypeError): - auth = ('', '') + auth = ("", "") return auth -# Moved outside of function to avoid recompile every call -_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$') -_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$') - - def check_header_validity(header): - """Verifies that header value is a string which doesn't contain - leading whitespace or return characters. This prevents unintended - header injection. + """Verifies that header parts don't contain leading whitespace + reserved characters, or return characters. :param header: tuple, in the format (name, value). """ name, value = header + _validate_header_part(header, name, 0) + _validate_header_part(header, value, 1) - if isinstance(value, bytes): - pat = _CLEAN_HEADER_REGEX_BYTE + +def _validate_header_part(header, header_part, header_validator_index): + if isinstance(header_part, str): + validator = _HEADER_VALIDATORS_STR[header_validator_index] + elif isinstance(header_part, bytes): + validator = _HEADER_VALIDATORS_BYTE[header_validator_index] else: - pat = _CLEAN_HEADER_REGEX_STR - try: - if not pat.match(value): - raise InvalidHeader("Invalid return character or leading space in header: %s" % name) - except TypeError: - raise InvalidHeader("Value for header {%s: %s} must be of type str or " - "bytes, not %s" % (name, value, type(value))) + raise InvalidHeader( + f"Header part ({header_part!r}) from {header} " + f"must be of type str or bytes, not {type(header_part)}" + ) + + if not validator.match(header_part): + header_kind = "name" if header_validator_index == 0 else "value" + raise InvalidHeader( + f"Invalid leading whitespace, reserved character(s), or return" + f"character(s) in header {header_kind}: {header_part!r}" + ) def urldefragauth(url): @@ -972,21 +1071,24 @@ def urldefragauth(url): if not netloc: netloc, path = path, netloc - netloc = netloc.rsplit('@', 1)[-1] + netloc = netloc.rsplit("@", 1)[-1] - return urlunparse((scheme, netloc, path, params, query, '')) + return urlunparse((scheme, netloc, path, params, query, "")) def rewind_body(prepared_request): """Move file pointer back to its recorded starting position so it can be read again on redirect. """ - body_seek = getattr(prepared_request.body, 'seek', None) - if body_seek is not None and isinstance(prepared_request._body_position, integer_types): + body_seek = getattr(prepared_request.body, "seek", None) + if body_seek is not None and isinstance( + prepared_request._body_position, integer_types + ): try: body_seek(prepared_request._body_position) - except (IOError, OSError): - raise UnrewindableBodyError("An error occurred when rewinding request " - "body for redirect.") + except OSError: + raise UnrewindableBodyError( + "An error occurred when rewinding request body for redirect." + ) else: raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/__init__.py b/env/Lib/site-packages/pip/_vendor/resolvelib/__init__.py index 184874d45cdbfb3d18c40bc35671e85820fc14b0..d92acc7bedfc5c7c05130986a256e610640582e5 100644 --- a/env/Lib/site-packages/pip/_vendor/resolvelib/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/resolvelib/__init__.py @@ -11,7 +11,7 @@ __all__ = [ "ResolutionTooDeep", ] -__version__ = "0.7.0" +__version__ = "1.0.1" from .providers import AbstractProvider, AbstractResolver @@ -19,8 +19,8 @@ from .reporters import BaseReporter from .resolvers import ( InconsistentCandidate, RequirementsConflicted, - Resolver, ResolutionError, ResolutionImpossible, ResolutionTooDeep, + Resolver, ) diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 5a268e0cfff4e1eb6cafb28063ccd8e8d5d92873..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-39.pyc deleted file mode 100644 index 062c18f3b7919be73adeafc1122bddc10087f147..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/providers.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-39.pyc deleted file mode 100644 index a31d59288bba0543ec81a1c7891ef794f7a3dc2b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/reporters.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/resolvers.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/resolvers.cpython-39.pyc deleted file mode 100644 index 9d6f161cfb869ef1558152154011fdc9211bdc48..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/resolvers.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-39.pyc deleted file mode 100644 index 6025f29b6d2f876eeabfd334189507ebaadc5546..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/resolvelib/__pycache__/structs.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/compat/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/resolvelib/compat/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index f0977531d64d31c96761d816b28b8dd3c8bad8a0..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/resolvelib/compat/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/compat/__pycache__/collections_abc.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/resolvelib/compat/__pycache__/collections_abc.cpython-39.pyc deleted file mode 100644 index 9f522103d7206be983e670eef634e210c2f0c811..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/resolvelib/compat/__pycache__/collections_abc.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/providers.py b/env/Lib/site-packages/pip/_vendor/resolvelib/providers.py index 4822d166551d986bfbc254c0972657cba2a6d883..e99d87ee75f6f665989a109828e07ef81cb3410c 100644 --- a/env/Lib/site-packages/pip/_vendor/resolvelib/providers.py +++ b/env/Lib/site-packages/pip/_vendor/resolvelib/providers.py @@ -1,5 +1,5 @@ class AbstractProvider(object): - """Delegate class to provide requirement interface for the resolver.""" + """Delegate class to provide the required interface for the resolver.""" def identify(self, requirement_or_candidate): """Given a requirement, return an identifier for it. @@ -9,7 +9,14 @@ class AbstractProvider(object): """ raise NotImplementedError - def get_preference(self, identifier, resolutions, candidates, information): + def get_preference( + self, + identifier, + resolutions, + candidates, + information, + backtrack_causes, + ): """Produce a sort key for given requirement based on preference. The preference is defined as "I think this requirement should be @@ -17,23 +24,25 @@ class AbstractProvider(object): this group of arguments is. :param identifier: An identifier as returned by ``identify()``. This - identifies the dependency matches of which should be returned. + identifies the dependency matches which should be returned. :param resolutions: Mapping of candidates currently pinned by the - resolver. Each key is an identifier, and the value a candidate. + resolver. Each key is an identifier, and the value is a candidate. The candidate may conflict with requirements from ``information``. :param candidates: Mapping of each dependency's possible candidates. Each value is an iterator of candidates. :param information: Mapping of requirement information of each package. Each value is an iterator of *requirement information*. + :param backtrack_causes: Sequence of requirement information that were + the requirements that caused the resolver to most recently backtrack. A *requirement information* instance is a named tuple with two members: * ``requirement`` specifies a requirement contributing to the current list of candidates. - * ``parent`` specifies the candidate that provides (dependend on) the + * ``parent`` specifies the candidate that provides (depended on) the requirement, or ``None`` to indicate a root requirement. - The preference could depend on a various of issues, including (not + The preference could depend on various issues, including (not necessarily in this order): * Is this package pinned in the current resolution result? @@ -52,7 +61,7 @@ class AbstractProvider(object): raise NotImplementedError def find_matches(self, identifier, requirements, incompatibilities): - """Find all possible candidates that satisfy given constraints. + """Find all possible candidates that satisfy the given constraints. :param identifier: An identifier as returned by ``identify()``. This identifies the dependency matches of which should be returned. @@ -83,7 +92,7 @@ class AbstractProvider(object): def is_satisfied_by(self, requirement, candidate): """Whether the given requirement can be satisfied by a candidate. - The candidate is guarenteed to have been generated from the + The candidate is guaranteed to have been generated from the requirement. A boolean should be returned to indicate whether ``candidate`` is a diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/reporters.py b/env/Lib/site-packages/pip/_vendor/resolvelib/reporters.py index 563489e133b01e8162114e077ba9ea026dcad55b..688b5e10d8608fdb324c5df0ec3d9f4aa720de0e 100644 --- a/env/Lib/site-packages/pip/_vendor/resolvelib/reporters.py +++ b/env/Lib/site-packages/pip/_vendor/resolvelib/reporters.py @@ -30,7 +30,13 @@ class BaseReporter(object): requirements passed in from ``Resolver.resolve()``. """ - def backtracking(self, candidate): + def resolving_conflicts(self, causes): + """Called when starting to attempt requirement conflict resolution. + + :param causes: The information on the collision that caused the backtracking. + """ + + def rejecting_candidate(self, criterion, candidate): """Called when rejecting a candidate during backtracking.""" def pinning(self, candidate): diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py b/env/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py index 99ee10516b830cfa6ac2261329716fb87544bbc4..2c3d0e306f91f9dfac1843b40babd223766bbf50 100644 --- a/env/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py +++ b/env/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py @@ -1,10 +1,10 @@ import collections +import itertools import operator from .providers import AbstractResolver from .structs import DirectedGraph, IteratorMapping, build_iter_view - RequirementInformation = collections.namedtuple( "RequirementInformation", ["requirement", "parent"] ) @@ -99,7 +99,7 @@ class ResolutionTooDeep(ResolutionError): # Resolution state in a round. -State = collections.namedtuple("State", "mapping criteria") +State = collections.namedtuple("State", "mapping criteria backtrack_causes") class Resolution(object): @@ -131,14 +131,15 @@ class Resolution(object): state = State( mapping=base.mapping.copy(), criteria=base.criteria.copy(), + backtrack_causes=base.backtrack_causes[:], ) self._states.append(state) - def _merge_into_criterion(self, requirement, parent): + def _add_to_criteria(self, criteria, requirement, parent): self._r.adding_requirement(requirement=requirement, parent=parent) identifier = self._p.identify(requirement_or_candidate=requirement) - criterion = self.state.criteria.get(identifier) + criterion = criteria.get(identifier) if criterion: incompatibilities = list(criterion.incompatibilities) else: @@ -147,12 +148,12 @@ class Resolution(object): matches = self._p.find_matches( identifier=identifier, requirements=IteratorMapping( - self.state.criteria, + criteria, operator.methodcaller("iter_requirement"), {identifier: [requirement]}, ), incompatibilities=IteratorMapping( - self.state.criteria, + criteria, operator.attrgetter("incompatibilities"), {identifier: incompatibilities}, ), @@ -171,7 +172,32 @@ class Resolution(object): ) if not criterion.candidates: raise RequirementsConflicted(criterion) - return identifier, criterion + criteria[identifier] = criterion + + def _remove_information_from_criteria(self, criteria, parents): + """Remove information from parents of criteria. + + Concretely, removes all values from each criterion's ``information`` + field that have one of ``parents`` as provider of the requirement. + + :param criteria: The criteria to update. + :param parents: Identifiers for which to remove information from all criteria. + """ + if not parents: + return + for key, criterion in criteria.items(): + criteria[key] = Criterion( + criterion.candidates, + [ + information + for information in criterion.information + if ( + information.parent is None + or self._p.identify(information.parent) not in parents + ) + ], + criterion.incompatibilities, + ) def _get_preference(self, name): return self._p.get_preference( @@ -185,6 +211,7 @@ class Resolution(object): self.state.criteria, operator.attrgetter("information"), ), + backtrack_causes=self.state.backtrack_causes, ) def _is_current_pin_satisfying(self, name, criterion): @@ -197,11 +224,10 @@ class Resolution(object): for r in criterion.iter_requirement() ) - def _get_criteria_to_update(self, candidate): - criteria = {} - for r in self._p.get_dependencies(candidate=candidate): - name, crit = self._merge_into_criterion(r, parent=candidate) - criteria[name] = crit + def _get_updated_criteria(self, candidate): + criteria = self.state.criteria.copy() + for requirement in self._p.get_dependencies(candidate=candidate): + self._add_to_criteria(criteria, requirement, parent=candidate) return criteria def _attempt_to_pin_criterion(self, name): @@ -210,8 +236,9 @@ class Resolution(object): causes = [] for candidate in criterion.candidates: try: - criteria = self._get_criteria_to_update(candidate) + criteria = self._get_updated_criteria(candidate) except RequirementsConflicted as e: + self._r.rejecting_candidate(e.criterion, candidate) causes.append(e.criterion) continue @@ -226,12 +253,13 @@ class Resolution(object): if not satisfied: raise InconsistentCandidate(candidate, criterion) + self._r.pinning(candidate=candidate) + self.state.criteria.update(criteria) + # Put newly-pinned candidate at the end. This is essential because # backtracking looks at this mapping to get the last pin. - self._r.pinning(candidate=candidate) self.state.mapping.pop(name, None) self.state.mapping[name] = candidate - self.state.criteria.update(criteria) return [] @@ -239,8 +267,8 @@ class Resolution(object): # end, signal for backtracking. return causes - def _backtrack(self): - """Perform backtracking. + def _backjump(self, causes): + """Perform backjumping. When we enter here, the stack is like this:: @@ -256,22 +284,46 @@ class Resolution(object): Each iteration of the loop will: - 1. Discard Z. - 2. Discard Y but remember its incompatibility information gathered + 1. Identify Z. The incompatibility is not always caused by the latest + state. For example, given three requirements A, B and C, with + dependencies A1, B1 and C1, where A1 and B1 are incompatible: the + last state might be related to C, so we want to discard the + previous state. + 2. Discard Z. + 3. Discard Y but remember its incompatibility information gathered previously, and the failure we're dealing with right now. - 3. Push a new state Y' based on X, and apply the incompatibility + 4. Push a new state Y' based on X, and apply the incompatibility information from Y to Y'. - 4a. If this causes Y' to conflict, we need to backtrack again. Make Y' + 5a. If this causes Y' to conflict, we need to backtrack again. Make Y' the new Z and go back to step 2. - 4b. If the incompatibilities apply cleanly, end backtracking. + 5b. If the incompatibilities apply cleanly, end backtracking. """ + incompatible_reqs = itertools.chain( + (c.parent for c in causes if c.parent is not None), + (c.requirement for c in causes), + ) + incompatible_deps = {self._p.identify(r) for r in incompatible_reqs} while len(self._states) >= 3: # Remove the state that triggered backtracking. del self._states[-1] - # Retrieve the last candidate pin and known incompatibilities. - broken_state = self._states.pop() - name, candidate = broken_state.mapping.popitem() + # Ensure to backtrack to a state that caused the incompatibility + incompatible_state = False + while not incompatible_state: + # Retrieve the last candidate pin and known incompatibilities. + try: + broken_state = self._states.pop() + name, candidate = broken_state.mapping.popitem() + except (IndexError, KeyError): + raise ResolutionImpossible(causes) + current_dependencies = { + self._p.identify(d) + for d in self._p.get_dependencies(candidate) + } + incompatible_state = not current_dependencies.isdisjoint( + incompatible_deps + ) + incompatibilities_from_broken = [ (k, list(v.incompatibilities)) for k, v in broken_state.criteria.items() @@ -280,8 +332,6 @@ class Resolution(object): # Also mark the newly known incompatibility. incompatibilities_from_broken.append((name, [candidate])) - self._r.backtracking(candidate=candidate) - # Create a new state from the last known-to-work one, and apply # the previously gathered incompatibility information. def _patch_criteria(): @@ -335,13 +385,18 @@ class Resolution(object): self._r.starting() # Initialize the root state. - self._states = [State(mapping=collections.OrderedDict(), criteria={})] + self._states = [ + State( + mapping=collections.OrderedDict(), + criteria={}, + backtrack_causes=[], + ) + ] for r in requirements: try: - name, crit = self._merge_into_criterion(r, parent=None) + self._add_to_criteria(self.state.criteria, r, parent=None) except RequirementsConflicted as e: raise ResolutionImpossible(e.criterion.information) - self.state.criteria[name] = crit # The root state is saved as a sentinel so the first ever pin can have # something to backtrack to if it fails. The root state is basically @@ -362,20 +417,38 @@ class Resolution(object): self._r.ending(state=self.state) return self.state + # keep track of satisfied names to calculate diff after pinning + satisfied_names = set(self.state.criteria.keys()) - set( + unsatisfied_names + ) + # Choose the most preferred unpinned criterion to try. name = min(unsatisfied_names, key=self._get_preference) failure_causes = self._attempt_to_pin_criterion(name) if failure_causes: - # Backtrack if pinning fails. The backtrack process puts us in + causes = [i for c in failure_causes for i in c.information] + # Backjump if pinning fails. The backjump process puts us in # an unpinned state, so we can work on it in the next round. - success = self._backtrack() + self._r.resolving_conflicts(causes=causes) + success = self._backjump(causes) + self.state.backtrack_causes[:] = causes # Dead ends everywhere. Give up. if not success: - causes = [i for c in failure_causes for i in c.information] - raise ResolutionImpossible(causes) + raise ResolutionImpossible(self.state.backtrack_causes) else: + # discard as information sources any invalidated names + # (unsatisfied names that were previously satisfied) + newly_unsatisfied_names = { + key + for key, criterion in self.state.criteria.items() + if key in satisfied_names + and not self._is_current_pin_satisfying(key, criterion) + } + self._remove_information_from_criteria( + self.state.criteria, newly_unsatisfied_names + ) # Pinning was successful. Push a new state to do another pin. self._push_new_state() diff --git a/env/Lib/site-packages/pip/_vendor/resolvelib/structs.py b/env/Lib/site-packages/pip/_vendor/resolvelib/structs.py index e1e7aa429e3aac074587d55269e45e191124f90c..359a34f60187591c26ee46d60e49c136acd6c765 100644 --- a/env/Lib/site-packages/pip/_vendor/resolvelib/structs.py +++ b/env/Lib/site-packages/pip/_vendor/resolvelib/structs.py @@ -75,6 +75,18 @@ class IteratorMapping(collections_abc.Mapping): self._accessor = accessor self._appends = appends or {} + def __repr__(self): + return "IteratorMapping({!r}, {!r}, {!r})".format( + self._mapping, + self._accessor, + self._appends, + ) + + def __bool__(self): + return bool(self._mapping or self._appends) + + __nonzero__ = __bool__ # XXX: Python 2. + def __contains__(self, key): return key in self._mapping or key in self._appends @@ -90,7 +102,7 @@ class IteratorMapping(collections_abc.Mapping): return itertools.chain(self._mapping, more) def __len__(self): - more = len(k for k in self._appends if k not in self._mapping) + more = sum(1 for k in self._appends if k not in self._mapping) return len(self._mapping) + more @@ -105,13 +117,14 @@ class _FactoryIterableView(object): def __init__(self, factory): self._factory = factory + self._iterable = None def __repr__(self): - return "{}({})".format(type(self).__name__, list(self._factory())) + return "{}({})".format(type(self).__name__, list(self)) def __bool__(self): try: - next(self._factory()) + next(iter(self)) except StopIteration: return False return True @@ -119,7 +132,11 @@ class _FactoryIterableView(object): __nonzero__ = __bool__ # XXX: Python 2. def __iter__(self): - return self._factory() + iterable = ( + self._factory() if self._iterable is None else self._iterable + ) + self._iterable, current = itertools.tee(iterable) + return current class _SequenceIterableView(object): diff --git a/env/Lib/site-packages/pip/_vendor/six.py b/env/Lib/site-packages/pip/_vendor/six.py index 83f69783d1a2dcb81e613268bc77afbd517be887..4e15675d8b5caa33255fe37271700f587bd26671 100644 --- a/env/Lib/site-packages/pip/_vendor/six.py +++ b/env/Lib/site-packages/pip/_vendor/six.py @@ -29,7 +29,7 @@ import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.15.0" +__version__ = "1.16.0" # Useful for very coarse version differentiation. @@ -71,6 +71,11 @@ else: MAXSIZE = int((1 << 63) - 1) del X +if PY34: + from importlib.util import spec_from_loader +else: + spec_from_loader = None + def _add_doc(func, doc): """Add documentation to a function.""" @@ -186,6 +191,11 @@ class _SixMetaPathImporter(object): return self return None + def find_spec(self, fullname, path, target=None): + if fullname in self.known_modules: + return spec_from_loader(fullname, self) + return None + def __get_module(self, fullname): try: return self.known_modules[fullname] @@ -223,6 +233,12 @@ class _SixMetaPathImporter(object): return None get_source = get_code # same as get_code + def create_module(self, spec): + return self.load_module(spec.name) + + def exec_module(self, module): + pass + _importer = _SixMetaPathImporter(__name__) diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__init__.py b/env/Lib/site-packages/pip/_vendor/tenacity/__init__.py index 42e9d8940b1abef581e4e0a0d673a0ed89fee858..4f1603adeb6fcf9bc1c4a16a9b6e16223c6534f3 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016-2018 Julien Danjou # Copyright 2017 Elisey Zanko # Copyright 2016 Étienne Bersac @@ -17,29 +16,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - from inspect import iscoroutinefunction -except ImportError: - iscoroutinefunction = None - -# Replace a conditional import with a hard-coded None so that pip does -# not attempt to use tornado even if it is present in the environment. -# If tornado is non-None, tenacity will attempt to execute some code -# that is sensitive to the version of tornado, which could break pip -# if an old version is found. -tornado = None +import functools import sys import threading +import time import typing as t import warnings -from abc import ABCMeta, abstractmethod +from abc import ABC, abstractmethod from concurrent import futures - - -from pip._vendor import six - -from pip._vendor.tenacity import _utils +from inspect import iscoroutinefunction # Import all built-in retry strategies for easier usage. from .retry import retry_base # noqa @@ -48,6 +34,8 @@ from .retry import retry_always # noqa from .retry import retry_any # noqa from .retry import retry_if_exception # noqa from .retry import retry_if_exception_type # noqa +from .retry import retry_if_exception_cause_type # noqa +from .retry import retry_if_not_exception_type # noqa from .retry import retry_if_not_result # noqa from .retry import retry_if_result # noqa from .retry import retry_never # noqa @@ -77,6 +65,7 @@ from .wait import wait_none # noqa from .wait import wait_random # noqa from .wait import wait_random_exponential # noqa from .wait import wait_random_exponential as wait_full_jitter # noqa +from .wait import wait_exponential_jitter # noqa # Import all built-in before strategies for easier usage. from .before import before_log # noqa @@ -90,58 +79,23 @@ from .after import after_nothing # noqa from .before_sleep import before_sleep_log # noqa from .before_sleep import before_sleep_nothing # noqa +# Replace a conditional import with a hard-coded None so that pip does +# not attempt to use tornado even if it is present in the environment. +# If tornado is non-None, tenacity will attempt to execute some code +# that is sensitive to the version of tornado, which could break pip +# if an old version is found. +tornado = None # type: ignore -WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable) - - -@t.overload -def retry(fn): - # type: (WrappedFn) -> WrappedFn - """Type signature for @retry as a raw decorator.""" - pass - - -@t.overload -def retry(*dargs, **dkw): # noqa - # type: (...) -> t.Callable[[WrappedFn], WrappedFn] - """Type signature for the @retry() decorator constructor.""" - pass - - -def retry(*dargs, **dkw): # noqa - """Wrap a function with a new `Retrying` object. - - :param dargs: positional arguments passed to Retrying object - :param dkw: keyword arguments passed to the Retrying object - """ - # support both @retry and @retry() as valid syntax - if len(dargs) == 1 and callable(dargs[0]): - return retry()(dargs[0]) - else: +if t.TYPE_CHECKING: + import types - def wrap(f): - if isinstance(f, retry_base): - warnings.warn( - ( - "Got retry_base instance ({cls}) as callable argument, " - + "this will probably hang indefinitely (did you mean " - + "retry={cls}(...)?)" - ).format(cls=f.__class__.__name__) - ) - if iscoroutinefunction is not None and iscoroutinefunction(f): - r = AsyncRetrying(*dargs, **dkw) - elif ( - tornado - and hasattr(tornado.gen, "is_coroutine_function") - and tornado.gen.is_coroutine_function(f) - ): - r = TornadoRetrying(*dargs, **dkw) - else: - r = Retrying(*dargs, **dkw) + from .retry import RetryBaseT + from .stop import StopBaseT + from .wait import WaitBaseT - return r.wraps(f) - return wrap +WrappedFnReturnT = t.TypeVar("WrappedFnReturnT") +WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable[..., t.Any]) class TryAgain(Exception): @@ -151,7 +105,7 @@ class TryAgain(Exception): NO_RESULT = object() -class DoAttempt(object): +class DoAttempt: pass @@ -159,25 +113,23 @@ class DoSleep(float): pass -class BaseAction(object): +class BaseAction: """Base class for representing actions to take by retry object. Concrete implementations must define: - __init__: to initialize all necessary fields - - REPR_ATTRS: class variable specifying attributes to include in repr(self) + - REPR_FIELDS: class variable specifying attributes to include in repr(self) - NAME: for identification in retry object methods and callbacks """ - REPR_FIELDS = () - NAME = None + REPR_FIELDS: t.Sequence[str] = () + NAME: t.Optional[str] = None - def __repr__(self): - state_str = ", ".join( - "%s=%r" % (field, getattr(self, field)) for field in self.REPR_FIELDS - ) - return "%s(%s)" % (type(self).__name__, state_str) + def __repr__(self) -> str: + state_str = ", ".join(f"{field}={getattr(self, field)!r}" for field in self.REPR_FIELDS) + return f"{self.__class__.__name__}({state_str})" - def __str__(self): + def __str__(self) -> str: return repr(self) @@ -185,66 +137,70 @@ class RetryAction(BaseAction): REPR_FIELDS = ("sleep",) NAME = "retry" - def __init__(self, sleep): + def __init__(self, sleep: t.SupportsFloat) -> None: self.sleep = float(sleep) _unset = object() -def _first_set(first, second): +def _first_set(first: t.Union[t.Any, object], second: t.Any) -> t.Any: return second if first is _unset else first class RetryError(Exception): """Encapsulates the last attempt instance right before giving up.""" - def __init__(self, last_attempt): + def __init__(self, last_attempt: "Future") -> None: self.last_attempt = last_attempt - super(RetryError, self).__init__(last_attempt) + super().__init__(last_attempt) - def reraise(self): + def reraise(self) -> "t.NoReturn": if self.last_attempt.failed: raise self.last_attempt.result() raise self - def __str__(self): - return "{0}[{1}]".format(self.__class__.__name__, self.last_attempt) + def __str__(self) -> str: + return f"{self.__class__.__name__}[{self.last_attempt}]" -class AttemptManager(object): +class AttemptManager: """Manage attempt context.""" - def __init__(self, retry_state): + def __init__(self, retry_state: "RetryCallState"): self.retry_state = retry_state - def __enter__(self): + def __enter__(self) -> None: pass - def __exit__(self, exc_type, exc_value, traceback): - if isinstance(exc_value, BaseException): + def __exit__( + self, + exc_type: t.Optional[t.Type[BaseException]], + exc_value: t.Optional[BaseException], + traceback: t.Optional["types.TracebackType"], + ) -> t.Optional[bool]: + if exc_type is not None and exc_value is not None: self.retry_state.set_exception((exc_type, exc_value, traceback)) return True # Swallow exception. else: # We don't have the result, actually. self.retry_state.set_result(None) + return None -class BaseRetrying(object): - __metaclass__ = ABCMeta - +class BaseRetrying(ABC): def __init__( self, - sleep=sleep, - stop=stop_never, - wait=wait_none(), - retry=retry_if_exception_type(), - before=before_nothing, - after=after_nothing, - before_sleep=None, - reraise=False, - retry_error_cls=RetryError, - retry_error_callback=None, + sleep: t.Callable[[t.Union[int, float]], None] = sleep, + stop: "StopBaseT" = stop_never, + wait: "WaitBaseT" = wait_none(), + retry: "RetryBaseT" = retry_if_exception_type(), + before: t.Callable[["RetryCallState"], None] = before_nothing, + after: t.Callable[["RetryCallState"], None] = after_nothing, + before_sleep: t.Optional[t.Callable[["RetryCallState"], None]] = None, + reraise: bool = False, + retry_error_cls: t.Type[RetryError] = RetryError, + retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]] = None, ): self.sleep = sleep self.stop = stop @@ -258,23 +214,19 @@ class BaseRetrying(object): self.retry_error_cls = retry_error_cls self.retry_error_callback = retry_error_callback - # This attribute was moved to RetryCallState and is deprecated on - # Retrying objects but kept for backward compatibility. - self.fn = None - def copy( self, - sleep=_unset, - stop=_unset, - wait=_unset, - retry=_unset, - before=_unset, - after=_unset, - before_sleep=_unset, - reraise=_unset, - retry_error_cls=_unset, - retry_error_callback=_unset, - ): + sleep: t.Union[t.Callable[[t.Union[int, float]], None], object] = _unset, + stop: t.Union["StopBaseT", object] = _unset, + wait: t.Union["WaitBaseT", object] = _unset, + retry: t.Union[retry_base, object] = _unset, + before: t.Union[t.Callable[["RetryCallState"], None], object] = _unset, + after: t.Union[t.Callable[["RetryCallState"], None], object] = _unset, + before_sleep: t.Union[t.Optional[t.Callable[["RetryCallState"], None]], object] = _unset, + reraise: t.Union[bool, object] = _unset, + retry_error_cls: t.Union[t.Type[RetryError], object] = _unset, + retry_error_callback: t.Union[t.Optional[t.Callable[["RetryCallState"], t.Any]], object] = _unset, + ) -> "BaseRetrying": """Copy this object with some parameters changed if needed.""" return self.__class__( sleep=_first_set(sleep, self.sleep), @@ -286,24 +238,22 @@ class BaseRetrying(object): before_sleep=_first_set(before_sleep, self.before_sleep), reraise=_first_set(reraise, self.reraise), retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls), - retry_error_callback=_first_set( - retry_error_callback, self.retry_error_callback - ), + retry_error_callback=_first_set(retry_error_callback, self.retry_error_callback), ) - def __repr__(self): - attrs = dict( - _utils.visible_attrs(self, attrs={"me": id(self)}), - __class__=self.__class__.__name__, - ) + def __repr__(self) -> str: return ( - "<%(__class__)s object at 0x%(me)x (stop=%(stop)s, " - "wait=%(wait)s, sleep=%(sleep)s, retry=%(retry)s, " - "before=%(before)s, after=%(after)s)>" - ) % (attrs) + f"<{self.__class__.__name__} object at 0x{id(self):x} (" + f"stop={self.stop}, " + f"wait={self.wait}, " + f"sleep={self.sleep}, " + f"retry={self.retry}, " + f"before={self.before}, " + f"after={self.after})>" + ) @property - def statistics(self): + def statistics(self) -> t.Dict[str, t.Any]: """Return a dictionary of runtime statistics. This dictionary will be empty when the controller has never been @@ -325,63 +275,60 @@ class BaseRetrying(object): statistics from each thread). """ try: - return self._local.statistics + return self._local.statistics # type: ignore[no-any-return] except AttributeError: - self._local.statistics = {} + self._local.statistics = t.cast(t.Dict[str, t.Any], {}) return self._local.statistics - def wraps(self, f): + def wraps(self, f: WrappedFn) -> WrappedFn: """Wrap a function for retrying. :param f: A function to wraps for retrying. """ - @_utils.wraps(f) - def wrapped_f(*args, **kw): + @functools.wraps(f) + def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any: return self(f, *args, **kw) - def retry_with(*args, **kwargs): + def retry_with(*args: t.Any, **kwargs: t.Any) -> WrappedFn: return self.copy(*args, **kwargs).wraps(f) - wrapped_f.retry = self - wrapped_f.retry_with = retry_with + wrapped_f.retry = self # type: ignore[attr-defined] + wrapped_f.retry_with = retry_with # type: ignore[attr-defined] - return wrapped_f + return wrapped_f # type: ignore[return-value] - def begin(self, fn): + def begin(self) -> None: self.statistics.clear() - self.statistics["start_time"] = _utils.now() + self.statistics["start_time"] = time.monotonic() self.statistics["attempt_number"] = 1 self.statistics["idle_for"] = 0 - self.fn = fn - def iter(self, retry_state): # noqa + def iter(self, retry_state: "RetryCallState") -> t.Union[DoAttempt, DoSleep, t.Any]: # noqa fut = retry_state.outcome if fut is None: if self.before is not None: self.before(retry_state) return DoAttempt() - is_explicit_retry = retry_state.outcome.failed and isinstance( - retry_state.outcome.exception(), TryAgain - ) - if not (is_explicit_retry or self.retry(retry_state=retry_state)): + is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain) + if not (is_explicit_retry or self.retry(retry_state)): return fut.result() if self.after is not None: - self.after(retry_state=retry_state) + self.after(retry_state) self.statistics["delay_since_first_attempt"] = retry_state.seconds_since_start - if self.stop(retry_state=retry_state): + if self.stop(retry_state): if self.retry_error_callback: - return self.retry_error_callback(retry_state=retry_state) + return self.retry_error_callback(retry_state) retry_exc = self.retry_error_cls(fut) if self.reraise: raise retry_exc.reraise() - six.raise_from(retry_exc, fut.exception()) + raise retry_exc from fut.exception() if self.wait: - sleep = self.wait(retry_state=retry_state) + sleep = self.wait(retry_state) else: sleep = 0.0 retry_state.next_action = RetryAction(sleep) @@ -390,12 +337,12 @@ class BaseRetrying(object): self.statistics["attempt_number"] += 1 if self.before_sleep is not None: - self.before_sleep(retry_state=retry_state) + self.before_sleep(retry_state) return DoSleep(sleep) - def __iter__(self): - self.begin(None) + def __iter__(self) -> t.Generator[AttemptManager, None, None]: + self.begin() retry_state = RetryCallState(self, fn=None, args=(), kwargs={}) while True: @@ -409,23 +356,25 @@ class BaseRetrying(object): break @abstractmethod - def __call__(self, *args, **kwargs): + def __call__( + self, + fn: t.Callable[..., WrappedFnReturnT], + *args: t.Any, + **kwargs: t.Any, + ) -> WrappedFnReturnT: pass - def call(self, *args, **kwargs): - """Use ``__call__`` instead because this method is deprecated.""" - warnings.warn( - "'call()' method is deprecated. " + "Use '__call__()' instead", - DeprecationWarning, - ) - return self.__call__(*args, **kwargs) - class Retrying(BaseRetrying): """Retrying controller.""" - def __call__(self, fn, *args, **kwargs): - self.begin(fn) + def __call__( + self, + fn: t.Callable[..., WrappedFnReturnT], + *args: t.Any, + **kwargs: t.Any, + ) -> WrappedFnReturnT: + self.begin() retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs) while True: @@ -434,30 +383,36 @@ class Retrying(BaseRetrying): try: result = fn(*args, **kwargs) except BaseException: # noqa: B902 - retry_state.set_exception(sys.exc_info()) + retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type] else: retry_state.set_result(result) elif isinstance(do, DoSleep): retry_state.prepare_for_next_attempt() self.sleep(do) else: - return do + return do # type: ignore[no-any-return] -class Future(futures.Future): +if sys.version_info[1] >= 9: + FutureGenericT = futures.Future[t.Any] +else: + FutureGenericT = futures.Future + + +class Future(FutureGenericT): """Encapsulates a (future or past) attempted call to a target function.""" - def __init__(self, attempt_number): - super(Future, self).__init__() + def __init__(self, attempt_number: int) -> None: + super().__init__() self.attempt_number = attempt_number @property - def failed(self): + def failed(self) -> bool: """Return whether a exception is being held in this future.""" return self.exception() is not None @classmethod - def construct(cls, attempt_number, value, has_exception): + def construct(cls, attempt_number: int, value: t.Any, has_exception: bool) -> "Future": """Construct a new Future object.""" fut = cls(attempt_number) if has_exception: @@ -467,12 +422,18 @@ class Future(futures.Future): return fut -class RetryCallState(object): +class RetryCallState: """State related to a single call wrapped with Retrying.""" - def __init__(self, retry_object, fn, args, kwargs): + def __init__( + self, + retry_object: BaseRetrying, + fn: t.Optional[WrappedFn], + args: t.Any, + kwargs: t.Any, + ) -> None: #: Retry call start timestamp - self.start_time = _utils.now() + self.start_time = time.monotonic() #: Retry manager object self.retry_object = retry_object #: Function wrapped by this retry call @@ -483,43 +444,165 @@ class RetryCallState(object): self.kwargs = kwargs #: The number of the current attempt - self.attempt_number = 1 + self.attempt_number: int = 1 #: Last outcome (result or exception) produced by the function - self.outcome = None + self.outcome: t.Optional[Future] = None #: Timestamp of the last outcome - self.outcome_timestamp = None + self.outcome_timestamp: t.Optional[float] = None #: Time spent sleeping in retries - self.idle_for = 0 + self.idle_for: float = 0.0 #: Next action as decided by the retry manager - self.next_action = None + self.next_action: t.Optional[RetryAction] = None @property - def seconds_since_start(self): + def seconds_since_start(self) -> t.Optional[float]: if self.outcome_timestamp is None: return None return self.outcome_timestamp - self.start_time - def prepare_for_next_attempt(self): + def prepare_for_next_attempt(self) -> None: self.outcome = None self.outcome_timestamp = None self.attempt_number += 1 self.next_action = None - def set_result(self, val): - ts = _utils.now() + def set_result(self, val: t.Any) -> None: + ts = time.monotonic() fut = Future(self.attempt_number) fut.set_result(val) self.outcome, self.outcome_timestamp = fut, ts - def set_exception(self, exc_info): - ts = _utils.now() + def set_exception( + self, exc_info: t.Tuple[t.Type[BaseException], BaseException, "types.TracebackType| None"] + ) -> None: + ts = time.monotonic() fut = Future(self.attempt_number) - _utils.capture(fut, exc_info) + fut.set_exception(exc_info[1]) self.outcome, self.outcome_timestamp = fut, ts + def __repr__(self) -> str: + if self.outcome is None: + result = "none yet" + elif self.outcome.failed: + exception = self.outcome.exception() + result = f"failed ({exception.__class__.__name__} {exception})" + else: + result = f"returned {self.outcome.result()}" + + slept = float(round(self.idle_for, 2)) + clsname = self.__class__.__name__ + return f"<{clsname} {id(self)}: attempt #{self.attempt_number}; slept for {slept}; last result: {result}>" + + +@t.overload +def retry(func: WrappedFn) -> WrappedFn: + ... + + +@t.overload +def retry( + sleep: t.Callable[[t.Union[int, float]], None] = sleep, + stop: "StopBaseT" = stop_never, + wait: "WaitBaseT" = wait_none(), + retry: "RetryBaseT" = retry_if_exception_type(), + before: t.Callable[["RetryCallState"], None] = before_nothing, + after: t.Callable[["RetryCallState"], None] = after_nothing, + before_sleep: t.Optional[t.Callable[["RetryCallState"], None]] = None, + reraise: bool = False, + retry_error_cls: t.Type["RetryError"] = RetryError, + retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]] = None, +) -> t.Callable[[WrappedFn], WrappedFn]: + ... + + +def retry(*dargs: t.Any, **dkw: t.Any) -> t.Any: + """Wrap a function with a new `Retrying` object. + + :param dargs: positional arguments passed to Retrying object + :param dkw: keyword arguments passed to the Retrying object + """ + # support both @retry and @retry() as valid syntax + if len(dargs) == 1 and callable(dargs[0]): + return retry()(dargs[0]) + else: + + def wrap(f: WrappedFn) -> WrappedFn: + if isinstance(f, retry_base): + warnings.warn( + f"Got retry_base instance ({f.__class__.__name__}) as callable argument, " + f"this will probably hang indefinitely (did you mean retry={f.__class__.__name__}(...)?)" + ) + r: "BaseRetrying" + if iscoroutinefunction(f): + r = AsyncRetrying(*dargs, **dkw) + elif tornado and hasattr(tornado.gen, "is_coroutine_function") and tornado.gen.is_coroutine_function(f): + r = TornadoRetrying(*dargs, **dkw) + else: + r = Retrying(*dargs, **dkw) + + return r.wraps(f) + + return wrap -if iscoroutinefunction: - from pip._vendor.tenacity._asyncio import AsyncRetrying + +from pip._vendor.tenacity._asyncio import AsyncRetrying # noqa:E402,I100 if tornado: from pip._vendor.tenacity.tornadoweb import TornadoRetrying + + +__all__ = [ + "retry_base", + "retry_all", + "retry_always", + "retry_any", + "retry_if_exception", + "retry_if_exception_type", + "retry_if_exception_cause_type", + "retry_if_not_exception_type", + "retry_if_not_result", + "retry_if_result", + "retry_never", + "retry_unless_exception_type", + "retry_if_exception_message", + "retry_if_not_exception_message", + "sleep", + "sleep_using_event", + "stop_after_attempt", + "stop_after_delay", + "stop_all", + "stop_any", + "stop_never", + "stop_when_event_set", + "wait_chain", + "wait_combine", + "wait_exponential", + "wait_fixed", + "wait_incrementing", + "wait_none", + "wait_random", + "wait_random_exponential", + "wait_full_jitter", + "wait_exponential_jitter", + "before_log", + "before_nothing", + "after_log", + "after_nothing", + "before_sleep_log", + "before_sleep_nothing", + "retry", + "WrappedFn", + "TryAgain", + "NO_RESULT", + "DoAttempt", + "DoSleep", + "BaseAction", + "RetryAction", + "RetryError", + "AttemptManager", + "BaseRetrying", + "Retrying", + "Future", + "RetryCallState", + "AsyncRetrying", +] diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 001b1a7fa9d35d4dfdb92f3bff67d39437251949..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/_asyncio.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/_asyncio.cpython-39.pyc deleted file mode 100644 index 3b91e36b9860330261afab3457cf18cdc2344855..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/_asyncio.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/_utils.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/_utils.cpython-39.pyc deleted file mode 100644 index acda5734ea771b4394eab166b0e3ac803689901c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/_utils.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/after.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/after.cpython-39.pyc deleted file mode 100644 index fd5a3325512a761d42ef133ff5c6b4eff191f6ab..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/after.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/before.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/before.cpython-39.pyc deleted file mode 100644 index 352c12dc204df5f3c7ee733d55beec0dd720ed4c..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/before.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/before_sleep.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/before_sleep.cpython-39.pyc deleted file mode 100644 index 19336c81dafca811eed88bb1a85887e1c153b53e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/before_sleep.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/compat.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/compat.cpython-39.pyc deleted file mode 100644 index 5dcd8e471a97ace9c209bbe19553aae881787f13..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/compat.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/nap.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/nap.cpython-39.pyc deleted file mode 100644 index ada140b69cc271300fd45807a99f8bcaf2574f68..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/nap.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/retry.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/retry.cpython-39.pyc deleted file mode 100644 index 65a3393e79869d742a0b48b593a396668bf2ca64..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/retry.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/stop.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/stop.cpython-39.pyc deleted file mode 100644 index 2db4c1e1427b4a1058f95b522991b81cbb12178a..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/stop.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/tornadoweb.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/tornadoweb.cpython-39.pyc deleted file mode 100644 index 69b973f11458b55b226a947be93ecf31d66d6a93..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/tornadoweb.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/wait.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/wait.cpython-39.pyc deleted file mode 100644 index 2beda98fff5b3a24f2af5567aed1c555d1a9d452..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/tenacity/__pycache__/wait.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/_asyncio.py b/env/Lib/site-packages/pip/_vendor/tenacity/_asyncio.py index d9a2d4634fe7d7215dd64abb959976411802add1..2e50cd7b40ef18e7f7ee56c0f528bf0ef88b167a 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/_asyncio.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/_asyncio.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 Étienne Bersac # Copyright 2016 Julien Danjou # Copyright 2016 Joshua Harlow @@ -15,7 +14,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import functools import sys +import typing as t from asyncio import sleep from pip._vendor.tenacity import AttemptManager @@ -24,14 +26,21 @@ from pip._vendor.tenacity import DoAttempt from pip._vendor.tenacity import DoSleep from pip._vendor.tenacity import RetryCallState +WrappedFnReturnT = t.TypeVar("WrappedFnReturnT") +WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable[..., t.Awaitable[t.Any]]) + class AsyncRetrying(BaseRetrying): - def __init__(self, sleep=sleep, **kwargs): - super(AsyncRetrying, self).__init__(**kwargs) + sleep: t.Callable[[float], t.Awaitable[t.Any]] + + def __init__(self, sleep: t.Callable[[float], t.Awaitable[t.Any]] = sleep, **kwargs: t.Any) -> None: + super().__init__(**kwargs) self.sleep = sleep - async def __call__(self, fn, *args, **kwargs): - self.begin(fn) + async def __call__( # type: ignore[override] + self, fn: WrappedFn, *args: t.Any, **kwargs: t.Any + ) -> WrappedFnReturnT: + self.begin() retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs) while True: @@ -40,21 +49,24 @@ class AsyncRetrying(BaseRetrying): try: result = await fn(*args, **kwargs) except BaseException: # noqa: B902 - retry_state.set_exception(sys.exc_info()) + retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type] else: retry_state.set_result(result) elif isinstance(do, DoSleep): retry_state.prepare_for_next_attempt() await self.sleep(do) else: - return do + return do # type: ignore[no-any-return] - def __aiter__(self): - self.begin(None) + def __iter__(self) -> t.Generator[AttemptManager, None, None]: + raise TypeError("AsyncRetrying object is not iterable") + + def __aiter__(self) -> "AsyncRetrying": + self.begin() self._retry_state = RetryCallState(self, fn=None, args=(), kwargs={}) return self - async def __anext__(self): + async def __anext__(self) -> AttemptManager: while True: do = self.iter(retry_state=self._retry_state) if do is None: @@ -65,17 +77,18 @@ class AsyncRetrying(BaseRetrying): self._retry_state.prepare_for_next_attempt() await self.sleep(do) else: - return do + raise StopAsyncIteration - def wraps(self, fn): + def wraps(self, fn: WrappedFn) -> WrappedFn: fn = super().wraps(fn) # Ensure wrapper is recognized as a coroutine function. - async def async_wrapped(*args, **kwargs): + @functools.wraps(fn) + async def async_wrapped(*args: t.Any, **kwargs: t.Any) -> t.Any: return await fn(*args, **kwargs) # Preserve attributes - async_wrapped.retry = fn.retry - async_wrapped.retry_with = fn.retry_with + async_wrapped.retry = fn.retry # type: ignore[attr-defined] + async_wrapped.retry_with = fn.retry_with # type: ignore[attr-defined] - return async_wrapped + return async_wrapped # type: ignore[return-value] diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/_utils.py b/env/Lib/site-packages/pip/_vendor/tenacity/_utils.py index 8c0ca788c4db37e3ccb44026574530a1e074015a..f14ff32096eab20a8cc1a5d3c2b8c8cfe1fcb2d2 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/_utils.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/_utils.py @@ -14,73 +14,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import inspect import sys -import time -from functools import update_wrapper +import typing +from datetime import timedelta -from pip._vendor import six -# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint... -try: - MAX_WAIT = sys.maxint / 2 -except AttributeError: - MAX_WAIT = 1073741823 +# sys.maxsize: +# An integer giving the maximum value a variable of type Py_ssize_t can take. +MAX_WAIT = sys.maxsize / 2 -if six.PY2: - from functools import WRAPPER_ASSIGNMENTS, WRAPPER_UPDATES - - def wraps(fn): - """Do the same as six.wraps but only copy attributes that exist. - - For example, object instances don't have __name__ attribute, so - six.wraps fails. This is fixed in Python 3 - (https://bugs.python.org/issue3445), but didn't get backported to six. - - Also, see https://github.com/benjaminp/six/issues/250. - """ - - def filter_hasattr(obj, attrs): - return tuple(a for a in attrs if hasattr(obj, a)) - - return six.wraps( - fn, - assigned=filter_hasattr(fn, WRAPPER_ASSIGNMENTS), - updated=filter_hasattr(fn, WRAPPER_UPDATES), - ) - - def capture(fut, tb): - # TODO(harlowja): delete this in future, since its - # has to repeatedly calculate this crap. - fut.set_exception_info(tb[1], tb[2]) - - def getargspec(func): - # This was deprecated in Python 3. - return inspect.getargspec(func) - - -else: - from functools import wraps # noqa - - def capture(fut, tb): - fut.set_exception(tb[1]) - - def getargspec(func): - return inspect.getfullargspec(func) - - -def visible_attrs(obj, attrs=None): - if attrs is None: - attrs = {} - for attr_name, attr in inspect.getmembers(obj): - if attr_name.startswith("_"): - continue - attrs[attr_name] = attr - return attrs - - -def find_ordinal(pos_num): +def find_ordinal(pos_num: int) -> str: # See: https://en.wikipedia.org/wiki/English_numerals#Ordinal_numbers if pos_num == 0: return "th" @@ -90,17 +34,17 @@ def find_ordinal(pos_num): return "nd" elif pos_num == 3: return "rd" - elif pos_num >= 4 and pos_num <= 20: + elif 4 <= pos_num <= 20: return "th" else: return find_ordinal(pos_num % 10) -def to_ordinal(pos_num): - return "%i%s" % (pos_num, find_ordinal(pos_num)) +def to_ordinal(pos_num: int) -> str: + return f"{pos_num}{find_ordinal(pos_num)}" -def get_callback_name(cb): +def get_callback_name(cb: typing.Callable[..., typing.Any]) -> str: """Get a callback fully-qualified name. If no name can be produced ``repr(cb)`` is called and returned. @@ -111,14 +55,6 @@ def get_callback_name(cb): except AttributeError: try: segments.append(cb.__name__) - if inspect.ismethod(cb): - try: - # This attribute doesn't exist on py3.x or newer, so - # we optionally ignore it... (on those versions of - # python `__qualname__` should have been found anyway). - segments.insert(0, cb.im_class.__name__) - except AttributeError: - pass except AttributeError: pass if not segments: @@ -133,27 +69,8 @@ def get_callback_name(cb): return ".".join(segments) -try: - now = time.monotonic # noqa -except AttributeError: - from monotonic import monotonic as now # noqa - - -class cached_property(object): - """A property that is computed once per instance. - - Upon being computed it replaces itself with an ordinary attribute. Deleting - the attribute resets the property. - - Source: https://github.com/bottlepy/bottle/blob/1de24157e74a6971d136550afe1b63eec5b0df2b/bottle.py#L234-L246 - """ # noqa: E501 +time_unit_type = typing.Union[int, float, timedelta] - def __init__(self, func): - update_wrapper(self, func) - self.func = func - def __get__(self, obj, cls): - if obj is None: - return self - value = obj.__dict__[self.func.__name__] = self.func(obj) - return value +def to_seconds(time_unit: time_unit_type) -> float: + return float(time_unit.total_seconds() if isinstance(time_unit, timedelta) else time_unit) diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/after.py b/env/Lib/site-packages/pip/_vendor/tenacity/after.py index c04e7c18325c35e8be86f92c5e00748f55ed6a69..574c9bcea6e222ea8283a3c8dafbda15a2893fe1 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/after.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/after.py @@ -14,27 +14,38 @@ # See the License for the specific language governing permissions and # limitations under the License. +import typing + from pip._vendor.tenacity import _utils +if typing.TYPE_CHECKING: + import logging + + from pip._vendor.tenacity import RetryCallState + -def after_nothing(retry_state): +def after_nothing(retry_state: "RetryCallState") -> None: """After call strategy that does nothing.""" -def after_log(logger, log_level, sec_format="%0.3f"): +def after_log( + logger: "logging.Logger", + log_level: int, + sec_format: str = "%0.3f", +) -> typing.Callable[["RetryCallState"], None]: """After call strategy that logs to some logger the finished attempt.""" - log_tpl = ( - "Finished call to '%s' after " + str(sec_format) + "(s), " - "this was the %s time calling it." - ) - def log_it(retry_state): + def log_it(retry_state: "RetryCallState") -> None: + if retry_state.fn is None: + # NOTE(sileht): can't really happen, but we must please mypy + fn_name = "<unknown>" + else: + fn_name = _utils.get_callback_name(retry_state.fn) logger.log( log_level, - log_tpl, - _utils.get_callback_name(retry_state.fn), - retry_state.seconds_since_start, - _utils.to_ordinal(retry_state.attempt_number), + f"Finished call to '{fn_name}' " + f"after {sec_format % retry_state.seconds_since_start}(s), " + f"this was the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.", ) return log_it diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/before.py b/env/Lib/site-packages/pip/_vendor/tenacity/before.py index 3229517d5f81597852340966645ba49556778fb3..cfd7dc72ee7fe9300948133cfeb660f610b90e4e 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/before.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/before.py @@ -14,22 +14,33 @@ # See the License for the specific language governing permissions and # limitations under the License. +import typing + from pip._vendor.tenacity import _utils +if typing.TYPE_CHECKING: + import logging + + from pip._vendor.tenacity import RetryCallState + -def before_nothing(retry_state): +def before_nothing(retry_state: "RetryCallState") -> None: """Before call strategy that does nothing.""" -def before_log(logger, log_level): +def before_log(logger: "logging.Logger", log_level: int) -> typing.Callable[["RetryCallState"], None]: """Before call strategy that logs to some logger the attempt.""" - def log_it(retry_state): + def log_it(retry_state: "RetryCallState") -> None: + if retry_state.fn is None: + # NOTE(sileht): can't really happen, but we must please mypy + fn_name = "<unknown>" + else: + fn_name = _utils.get_callback_name(retry_state.fn) logger.log( log_level, - "Starting call to '%s', this is the %s time calling it.", - _utils.get_callback_name(retry_state.fn), - _utils.to_ordinal(retry_state.attempt_number), + f"Starting call to '{fn_name}', " + f"this is the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.", ) return log_it diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/before_sleep.py b/env/Lib/site-packages/pip/_vendor/tenacity/before_sleep.py index a051acacb2ed5030d8b8e48249d2596f2abe3652..8c6167fb3a6b390c5c0a3ba455f76cedf34695f2 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/before_sleep.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/before_sleep.py @@ -14,37 +14,57 @@ # See the License for the specific language governing permissions and # limitations under the License. +import typing + from pip._vendor.tenacity import _utils -from pip._vendor.tenacity.compat import get_exc_info_from_future +if typing.TYPE_CHECKING: + import logging + + from pip._vendor.tenacity import RetryCallState -def before_sleep_nothing(retry_state): + +def before_sleep_nothing(retry_state: "RetryCallState") -> None: """Before call strategy that does nothing.""" -def before_sleep_log(logger, log_level, exc_info=False): +def before_sleep_log( + logger: "logging.Logger", + log_level: int, + exc_info: bool = False, +) -> typing.Callable[["RetryCallState"], None]: """Before call strategy that logs to some logger the attempt.""" - def log_it(retry_state): + def log_it(retry_state: "RetryCallState") -> None: + local_exc_info: BaseException | bool | None + + if retry_state.outcome is None: + raise RuntimeError("log_it() called before outcome was set") + + if retry_state.next_action is None: + raise RuntimeError("log_it() called before next_action was set") + if retry_state.outcome.failed: ex = retry_state.outcome.exception() - verb, value = "raised", "%s: %s" % (type(ex).__name__, ex) + verb, value = "raised", f"{ex.__class__.__name__}: {ex}" if exc_info: - local_exc_info = get_exc_info_from_future(retry_state.outcome) + local_exc_info = retry_state.outcome.exception() else: local_exc_info = False else: verb, value = "returned", retry_state.outcome.result() local_exc_info = False # exc_info does not apply when no exception + if retry_state.fn is None: + # NOTE(sileht): can't really happen, but we must please mypy + fn_name = "<unknown>" + else: + fn_name = _utils.get_callback_name(retry_state.fn) + logger.log( log_level, - "Retrying %s in %s seconds as it %s %s.", - _utils.get_callback_name(retry_state.fn), - getattr(retry_state.next_action, "sleep"), - verb, - value, + f"Retrying {fn_name} " f"in {retry_state.next_action.sleep} seconds as it {verb} {value}.", exc_info=local_exc_info, ) diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/compat.py b/env/Lib/site-packages/pip/_vendor/tenacity/compat.py deleted file mode 100644 index ce4796b133046da989ce4f35eaa7b391da8eb1dd..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/tenacity/compat.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Utilities for providing backward compatibility.""" -from pip._vendor import six - - -def get_exc_info_from_future(future): - """ - Get an exc_info value from a Future. - - Given a a Future instance, retrieve an exc_info value suitable for passing - in as the exc_info parameter to logging.Logger.log() and related methods. - - On Python 2, this will be a (type, value, traceback) triple. - On Python 3, this will be an exception instance (with embedded traceback). - - If there was no exception, None is returned on both versions of Python. - """ - if six.PY3: - return future.exception() - else: - ex, tb = future.exception_info() - if ex is None: - return None - return type(ex), ex, tb diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/nap.py b/env/Lib/site-packages/pip/_vendor/tenacity/nap.py index 83ff839c361e5ffa5b5e9223858003d596c93f1e..72aa5bfd4b60d8e6ef6ed0cf2ae4f763d12195cc 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/nap.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/nap.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 Étienne Bersac # Copyright 2016 Julien Danjou # Copyright 2016 Joshua Harlow @@ -17,9 +16,13 @@ # limitations under the License. import time +import typing +if typing.TYPE_CHECKING: + import threading -def sleep(seconds): + +def sleep(seconds: float) -> None: """ Sleep strategy that delays execution for a given number of seconds. @@ -28,13 +31,13 @@ def sleep(seconds): time.sleep(seconds) -class sleep_using_event(object): +class sleep_using_event: """Sleep strategy that waits on an event to be set.""" - def __init__(self, event): + def __init__(self, event: "threading.Event") -> None: self.event = event - def __call__(self, timeout): + def __call__(self, timeout: typing.Optional[float]) -> None: # NOTE(harlowja): this may *not* actually wait for timeout # seconds if the event is set (ie this may eject out early). self.event.wait(timeout=timeout) diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/retry.py b/env/Lib/site-packages/pip/_vendor/tenacity/retry.py index ddaf8e7fd73fc96acf611d7bc7c874300a353324..38988739d6406aeb5e3be903c0ea6fb82752f328 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/retry.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/retry.py @@ -1,5 +1,3 @@ -# -*- encoding: utf-8 -*- -# # Copyright 2016–2021 Julien Danjou # Copyright 2016 Joshua Harlow # Copyright 2013-2014 Ray Holder @@ -18,29 +16,33 @@ import abc import re +import typing -from pip._vendor import six +if typing.TYPE_CHECKING: + from pip._vendor.tenacity import RetryCallState -@six.add_metaclass(abc.ABCMeta) -class retry_base(object): +class retry_base(abc.ABC): """Abstract base class for retry strategies.""" @abc.abstractmethod - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: pass - def __and__(self, other): + def __and__(self, other: "retry_base") -> "retry_all": return retry_all(self, other) - def __or__(self, other): + def __or__(self, other: "retry_base") -> "retry_any": return retry_any(self, other) +RetryBaseT = typing.Union[retry_base, typing.Callable[["RetryCallState"], bool]] + + class _retry_never(retry_base): """Retry strategy that never rejects any result.""" - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: return False @@ -50,7 +52,7 @@ retry_never = _retry_never() class _retry_always(retry_base): """Retry strategy that always rejects any result.""" - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: return True @@ -60,12 +62,18 @@ retry_always = _retry_always() class retry_if_exception(retry_base): """Retry strategy that retries if an exception verifies a predicate.""" - def __init__(self, predicate): + def __init__(self, predicate: typing.Callable[[BaseException], bool]) -> None: self.predicate = predicate - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: + if retry_state.outcome is None: + raise RuntimeError("__call__() called before outcome was set") + if retry_state.outcome.failed: - return self.predicate(retry_state.outcome.exception()) + exception = retry_state.outcome.exception() + if exception is None: + raise RuntimeError("outcome failed but the exception is None") + return self.predicate(exception) else: return False @@ -73,36 +81,98 @@ class retry_if_exception(retry_base): class retry_if_exception_type(retry_if_exception): """Retries if an exception has been raised of one or more types.""" - def __init__(self, exception_types=Exception): + def __init__( + self, + exception_types: typing.Union[ + typing.Type[BaseException], + typing.Tuple[typing.Type[BaseException], ...], + ] = Exception, + ) -> None: + self.exception_types = exception_types + super().__init__(lambda e: isinstance(e, exception_types)) + + +class retry_if_not_exception_type(retry_if_exception): + """Retries except an exception has been raised of one or more types.""" + + def __init__( + self, + exception_types: typing.Union[ + typing.Type[BaseException], + typing.Tuple[typing.Type[BaseException], ...], + ] = Exception, + ) -> None: self.exception_types = exception_types - super(retry_if_exception_type, self).__init__( - lambda e: isinstance(e, exception_types) - ) + super().__init__(lambda e: not isinstance(e, exception_types)) class retry_unless_exception_type(retry_if_exception): """Retries until an exception is raised of one or more types.""" - def __init__(self, exception_types=Exception): + def __init__( + self, + exception_types: typing.Union[ + typing.Type[BaseException], + typing.Tuple[typing.Type[BaseException], ...], + ] = Exception, + ) -> None: self.exception_types = exception_types - super(retry_unless_exception_type, self).__init__( - lambda e: not isinstance(e, exception_types) - ) + super().__init__(lambda e: not isinstance(e, exception_types)) + + def __call__(self, retry_state: "RetryCallState") -> bool: + if retry_state.outcome is None: + raise RuntimeError("__call__() called before outcome was set") - def __call__(self, retry_state): # always retry if no exception was raised if not retry_state.outcome.failed: return True - return self.predicate(retry_state.outcome.exception()) + + exception = retry_state.outcome.exception() + if exception is None: + raise RuntimeError("outcome failed but the exception is None") + return self.predicate(exception) + + +class retry_if_exception_cause_type(retry_base): + """Retries if any of the causes of the raised exception is of one or more types. + + The check on the type of the cause of the exception is done recursively (until finding + an exception in the chain that has no `__cause__`) + """ + + def __init__( + self, + exception_types: typing.Union[ + typing.Type[BaseException], + typing.Tuple[typing.Type[BaseException], ...], + ] = Exception, + ) -> None: + self.exception_cause_types = exception_types + + def __call__(self, retry_state: "RetryCallState") -> bool: + if retry_state.outcome is None: + raise RuntimeError("__call__ called before outcome was set") + + if retry_state.outcome.failed: + exc = retry_state.outcome.exception() + while exc is not None: + if isinstance(exc.__cause__, self.exception_cause_types): + return True + exc = exc.__cause__ + + return False class retry_if_result(retry_base): """Retries if the result verifies a predicate.""" - def __init__(self, predicate): + def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None: self.predicate = predicate - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: + if retry_state.outcome is None: + raise RuntimeError("__call__() called before outcome was set") + if not retry_state.outcome.failed: return self.predicate(retry_state.outcome.result()) else: @@ -112,10 +182,13 @@ class retry_if_result(retry_base): class retry_if_not_result(retry_base): """Retries if the result refutes a predicate.""" - def __init__(self, predicate): + def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None: self.predicate = predicate - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: + if retry_state.outcome is None: + raise RuntimeError("__call__() called before outcome was set") + if not retry_state.outcome.failed: return not self.predicate(retry_state.outcome.result()) else: @@ -125,68 +198,75 @@ class retry_if_not_result(retry_base): class retry_if_exception_message(retry_if_exception): """Retries if an exception message equals or matches.""" - def __init__(self, message=None, match=None): + def __init__( + self, + message: typing.Optional[str] = None, + match: typing.Optional[str] = None, + ) -> None: if message and match: - raise TypeError( - "{}() takes either 'message' or 'match', not both".format( - self.__class__.__name__ - ) - ) + raise TypeError(f"{self.__class__.__name__}() takes either 'message' or 'match', not both") # set predicate if message: - def message_fnc(exception): + def message_fnc(exception: BaseException) -> bool: return message == str(exception) predicate = message_fnc elif match: prog = re.compile(match) - def match_fnc(exception): - return prog.match(str(exception)) + def match_fnc(exception: BaseException) -> bool: + return bool(prog.match(str(exception))) predicate = match_fnc else: - raise TypeError( - "{}() missing 1 required argument 'message' or 'match'".format( - self.__class__.__name__ - ) - ) + raise TypeError(f"{self.__class__.__name__}() missing 1 required argument 'message' or 'match'") - super(retry_if_exception_message, self).__init__(predicate) + super().__init__(predicate) class retry_if_not_exception_message(retry_if_exception_message): """Retries until an exception message equals or matches.""" - def __init__(self, *args, **kwargs): - super(retry_if_not_exception_message, self).__init__(*args, **kwargs) + def __init__( + self, + message: typing.Optional[str] = None, + match: typing.Optional[str] = None, + ) -> None: + super().__init__(message, match) # invert predicate if_predicate = self.predicate self.predicate = lambda *args_, **kwargs_: not if_predicate(*args_, **kwargs_) - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: + if retry_state.outcome is None: + raise RuntimeError("__call__() called before outcome was set") + if not retry_state.outcome.failed: return True - return self.predicate(retry_state.outcome.exception()) + + exception = retry_state.outcome.exception() + if exception is None: + raise RuntimeError("outcome failed but the exception is None") + return self.predicate(exception) class retry_any(retry_base): """Retries if any of the retries condition is valid.""" - def __init__(self, *retries): + def __init__(self, *retries: retry_base) -> None: self.retries = retries - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: return any(r(retry_state) for r in self.retries) class retry_all(retry_base): """Retries if all the retries condition are valid.""" - def __init__(self, *retries): + def __init__(self, *retries: retry_base) -> None: self.retries = retries - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: return all(r(retry_state) for r in self.retries) diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/stop.py b/env/Lib/site-packages/pip/_vendor/tenacity/stop.py index 4db27f14431d04209e37fb0c7a86af906eb29c3c..bb23effdf865b007756451f61fcbd7635f15b5d5 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/stop.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/stop.py @@ -1,5 +1,3 @@ -# -*- encoding: utf-8 -*- -# # Copyright 2016–2021 Julien Danjou # Copyright 2016 Joshua Harlow # Copyright 2013-2014 Ray Holder @@ -16,49 +14,57 @@ # See the License for the specific language governing permissions and # limitations under the License. import abc +import typing + +from pip._vendor.tenacity import _utils + +if typing.TYPE_CHECKING: + import threading -from pip._vendor import six + from pip._vendor.tenacity import RetryCallState -@six.add_metaclass(abc.ABCMeta) -class stop_base(object): +class stop_base(abc.ABC): """Abstract base class for stop strategies.""" @abc.abstractmethod - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: pass - def __and__(self, other): + def __and__(self, other: "stop_base") -> "stop_all": return stop_all(self, other) - def __or__(self, other): + def __or__(self, other: "stop_base") -> "stop_any": return stop_any(self, other) +StopBaseT = typing.Union[stop_base, typing.Callable[["RetryCallState"], bool]] + + class stop_any(stop_base): """Stop if any of the stop condition is valid.""" - def __init__(self, *stops): + def __init__(self, *stops: stop_base) -> None: self.stops = stops - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: return any(x(retry_state) for x in self.stops) class stop_all(stop_base): """Stop if all the stop conditions are valid.""" - def __init__(self, *stops): + def __init__(self, *stops: stop_base) -> None: self.stops = stops - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: return all(x(retry_state) for x in self.stops) class _stop_never(stop_base): """Never stop.""" - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: return False @@ -68,28 +74,30 @@ stop_never = _stop_never() class stop_when_event_set(stop_base): """Stop when the given event is set.""" - def __init__(self, event): + def __init__(self, event: "threading.Event") -> None: self.event = event - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: return self.event.is_set() class stop_after_attempt(stop_base): """Stop when the previous attempt >= max_attempt.""" - def __init__(self, max_attempt_number): + def __init__(self, max_attempt_number: int) -> None: self.max_attempt_number = max_attempt_number - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: return retry_state.attempt_number >= self.max_attempt_number class stop_after_delay(stop_base): """Stop when the time from the first attempt >= limit.""" - def __init__(self, max_delay): - self.max_delay = max_delay + def __init__(self, max_delay: _utils.time_unit_type) -> None: + self.max_delay = _utils.to_seconds(max_delay) - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> bool: + if retry_state.seconds_since_start is None: + raise RuntimeError("__call__() called but seconds_since_start is not set") return retry_state.seconds_since_start >= self.max_delay diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/tornadoweb.py b/env/Lib/site-packages/pip/_vendor/tenacity/tornadoweb.py index dbf9f762578fc075eedb629d64d9d60f4cdea6a5..e19c30b18905a39466ab6b51403438605e706caf 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/tornadoweb.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/tornadoweb.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Elisey Zanko # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,6 +13,7 @@ # limitations under the License. import sys +import typing from pip._vendor.tenacity import BaseRetrying from pip._vendor.tenacity import DoAttempt @@ -22,15 +22,25 @@ from pip._vendor.tenacity import RetryCallState from tornado import gen +if typing.TYPE_CHECKING: + from tornado.concurrent import Future + +_RetValT = typing.TypeVar("_RetValT") + class TornadoRetrying(BaseRetrying): - def __init__(self, sleep=gen.sleep, **kwargs): - super(TornadoRetrying, self).__init__(**kwargs) + def __init__(self, sleep: "typing.Callable[[float], Future[None]]" = gen.sleep, **kwargs: typing.Any) -> None: + super().__init__(**kwargs) self.sleep = sleep - @gen.coroutine - def __call__(self, fn, *args, **kwargs): - self.begin(fn) + @gen.coroutine # type: ignore[misc] + def __call__( + self, + fn: "typing.Callable[..., typing.Union[typing.Generator[typing.Any, typing.Any, _RetValT], Future[_RetValT]]]", + *args: typing.Any, + **kwargs: typing.Any, + ) -> "typing.Generator[typing.Any, typing.Any, _RetValT]": + self.begin() retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs) while True: @@ -39,7 +49,7 @@ class TornadoRetrying(BaseRetrying): try: result = yield fn(*args, **kwargs) except BaseException: # noqa: B902 - retry_state.set_exception(sys.exc_info()) + retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type] else: retry_state.set_result(result) elif isinstance(do, DoSleep): diff --git a/env/Lib/site-packages/pip/_vendor/tenacity/wait.py b/env/Lib/site-packages/pip/_vendor/tenacity/wait.py index 625b0e368ef07a527fc06e82ec5699ff0f2f78d6..f9349c028360d541c56962d6a09bd9c2a00e3a37 100644 --- a/env/Lib/site-packages/pip/_vendor/tenacity/wait.py +++ b/env/Lib/site-packages/pip/_vendor/tenacity/wait.py @@ -1,5 +1,3 @@ -# -*- encoding: utf-8 -*- -# # Copyright 2016–2021 Julien Danjou # Copyright 2016 Joshua Harlow # Copyright 2013-2014 Ray Holder @@ -18,67 +16,69 @@ import abc import random - -from pip._vendor import six +import typing from pip._vendor.tenacity import _utils +if typing.TYPE_CHECKING: + from pip._vendor.tenacity import RetryCallState + -@six.add_metaclass(abc.ABCMeta) -class wait_base(object): +class wait_base(abc.ABC): """Abstract base class for wait strategies.""" @abc.abstractmethod - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> float: pass - def __add__(self, other): + def __add__(self, other: "wait_base") -> "wait_combine": return wait_combine(self, other) - def __radd__(self, other): + def __radd__(self, other: "wait_base") -> typing.Union["wait_combine", "wait_base"]: # make it possible to use multiple waits with the built-in sum function - if other == 0: + if other == 0: # type: ignore[comparison-overlap] return self return self.__add__(other) +WaitBaseT = typing.Union[wait_base, typing.Callable[["RetryCallState"], typing.Union[float, int]]] + + class wait_fixed(wait_base): """Wait strategy that waits a fixed amount of time between each retry.""" - def __init__(self, wait): - self.wait_fixed = wait + def __init__(self, wait: _utils.time_unit_type) -> None: + self.wait_fixed = _utils.to_seconds(wait) - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> float: return self.wait_fixed class wait_none(wait_fixed): """Wait strategy that doesn't wait at all before retrying.""" - def __init__(self): - super(wait_none, self).__init__(0) + def __init__(self) -> None: + super().__init__(0) class wait_random(wait_base): """Wait strategy that waits a random amount of time between min/max.""" - def __init__(self, min=0, max=1): # noqa - self.wait_random_min = min - self.wait_random_max = max + def __init__(self, min: _utils.time_unit_type = 0, max: _utils.time_unit_type = 1) -> None: # noqa + self.wait_random_min = _utils.to_seconds(min) + self.wait_random_max = _utils.to_seconds(max) - def __call__(self, retry_state): - return self.wait_random_min + ( - random.random() * (self.wait_random_max - self.wait_random_min) - ) + def __call__(self, retry_state: "RetryCallState") -> float: + return self.wait_random_min + (random.random() * (self.wait_random_max - self.wait_random_min)) class wait_combine(wait_base): """Combine several waiting strategies.""" - def __init__(self, *strategies): + def __init__(self, *strategies: wait_base) -> None: self.wait_funcs = strategies - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> float: return sum(x(retry_state=retry_state) for x in self.wait_funcs) @@ -98,10 +98,10 @@ class wait_chain(wait_base): thereafter.") """ - def __init__(self, *strategies): + def __init__(self, *strategies: wait_base) -> None: self.strategies = strategies - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> float: wait_func_no = min(max(retry_state.attempt_number, 1), len(self.strategies)) wait_func = self.strategies[wait_func_no - 1] return wait_func(retry_state=retry_state) @@ -114,12 +114,17 @@ class wait_incrementing(wait_base): (and restricting the upper limit to some maximum value). """ - def __init__(self, start=0, increment=100, max=_utils.MAX_WAIT): # noqa - self.start = start - self.increment = increment - self.max = max - - def __call__(self, retry_state): + def __init__( + self, + start: _utils.time_unit_type = 0, + increment: _utils.time_unit_type = 100, + max: _utils.time_unit_type = _utils.MAX_WAIT, # noqa + ) -> None: + self.start = _utils.to_seconds(start) + self.increment = _utils.to_seconds(increment) + self.max = _utils.to_seconds(max) + + def __call__(self, retry_state: "RetryCallState") -> float: result = self.start + (self.increment * (retry_state.attempt_number - 1)) return max(0, min(result, self.max)) @@ -137,13 +142,19 @@ class wait_exponential(wait_base): wait_random_exponential for the latter case. """ - def __init__(self, multiplier=1, max=_utils.MAX_WAIT, exp_base=2, min=0): # noqa + def __init__( + self, + multiplier: typing.Union[int, float] = 1, + max: _utils.time_unit_type = _utils.MAX_WAIT, # noqa + exp_base: typing.Union[int, float] = 2, + min: _utils.time_unit_type = 0, # noqa + ) -> None: self.multiplier = multiplier - self.min = min - self.max = max + self.min = _utils.to_seconds(min) + self.max = _utils.to_seconds(max) self.exp_base = exp_base - def __call__(self, retry_state): + def __call__(self, retry_state: "RetryCallState") -> float: try: exp = self.exp_base ** (retry_state.attempt_number - 1) result = self.multiplier * exp @@ -178,6 +189,40 @@ class wait_random_exponential(wait_exponential): """ - def __call__(self, retry_state): - high = super(wait_random_exponential, self).__call__(retry_state=retry_state) + def __call__(self, retry_state: "RetryCallState") -> float: + high = super().__call__(retry_state=retry_state) return random.uniform(0, high) + + +class wait_exponential_jitter(wait_base): + """Wait strategy that applies exponential backoff and jitter. + + It allows for a customized initial wait, maximum wait and jitter. + + This implements the strategy described here: + https://cloud.google.com/storage/docs/retry-strategy + + The wait time is min(initial * 2**n + random.uniform(0, jitter), maximum) + where n is the retry count. + """ + + def __init__( + self, + initial: float = 1, + max: float = _utils.MAX_WAIT, # noqa + exp_base: float = 2, + jitter: float = 1, + ) -> None: + self.initial = initial + self.max = max + self.exp_base = exp_base + self.jitter = jitter + + def __call__(self, retry_state: "RetryCallState") -> float: + jitter = random.uniform(0, self.jitter) + try: + exp = self.exp_base ** (retry_state.attempt_number - 1) + result = self.initial * exp + jitter + except OverflowError: + result = self.max + return max(0, min(result, self.max)) diff --git a/env/Lib/site-packages/pip/_vendor/toml/__init__.py b/env/Lib/site-packages/pip/_vendor/toml/__init__.py deleted file mode 100644 index 34a5eabb6ea5bc06db65823a30da036d682a985b..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/toml/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Python module which parses and emits TOML. - -Released under the MIT license. -""" - -from pip._vendor.toml import encoder -from pip._vendor.toml import decoder - -__version__ = "0.10.2" -_spec_ = "0.5.0" - -load = decoder.load -loads = decoder.loads -TomlDecoder = decoder.TomlDecoder -TomlDecodeError = decoder.TomlDecodeError -TomlPreserveCommentDecoder = decoder.TomlPreserveCommentDecoder - -dump = encoder.dump -dumps = encoder.dumps -TomlEncoder = encoder.TomlEncoder -TomlArraySeparatorEncoder = encoder.TomlArraySeparatorEncoder -TomlPreserveInlineDictEncoder = encoder.TomlPreserveInlineDictEncoder -TomlNumpyEncoder = encoder.TomlNumpyEncoder -TomlPreserveCommentEncoder = encoder.TomlPreserveCommentEncoder -TomlPathlibEncoder = encoder.TomlPathlibEncoder diff --git a/env/Lib/site-packages/pip/_vendor/toml/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/toml/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index ca82ded3eb81ec38fe627461a835663545c2b4bd..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/toml/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/toml/__pycache__/decoder.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/toml/__pycache__/decoder.cpython-39.pyc deleted file mode 100644 index ee3886f5664d4469ce2cb19412609f523febcade..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/toml/__pycache__/decoder.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/toml/__pycache__/encoder.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/toml/__pycache__/encoder.cpython-39.pyc deleted file mode 100644 index 8b97b1bb72931a01834061a9be164ebc033076c5..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/toml/__pycache__/encoder.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/toml/__pycache__/ordered.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/toml/__pycache__/ordered.cpython-39.pyc deleted file mode 100644 index 90cfae58691e54ede44f5243c98e1955324a18e1..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/toml/__pycache__/ordered.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/toml/__pycache__/tz.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/toml/__pycache__/tz.cpython-39.pyc deleted file mode 100644 index 648184f27d9f9fa32c959539c8b12e66a5449dba..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/toml/__pycache__/tz.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/toml/decoder.py b/env/Lib/site-packages/pip/_vendor/toml/decoder.py deleted file mode 100644 index e071100de0f3ef75b2149857861c2f60fc4e89af..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/toml/decoder.py +++ /dev/null @@ -1,1057 +0,0 @@ -import datetime -import io -from os import linesep -import re -import sys - -from pip._vendor.toml.tz import TomlTz - -if sys.version_info < (3,): - _range = xrange # noqa: F821 -else: - unicode = str - _range = range - basestring = str - unichr = chr - - -def _detect_pathlib_path(p): - if (3, 4) <= sys.version_info: - import pathlib - if isinstance(p, pathlib.PurePath): - return True - return False - - -def _ispath(p): - if isinstance(p, (bytes, basestring)): - return True - return _detect_pathlib_path(p) - - -def _getpath(p): - if (3, 6) <= sys.version_info: - import os - return os.fspath(p) - if _detect_pathlib_path(p): - return str(p) - return p - - -try: - FNFError = FileNotFoundError -except NameError: - FNFError = IOError - - -TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?") - - -class TomlDecodeError(ValueError): - """Base toml Exception / Error.""" - - def __init__(self, msg, doc, pos): - lineno = doc.count('\n', 0, pos) + 1 - colno = pos - doc.rfind('\n', 0, pos) - emsg = '{} (line {} column {} char {})'.format(msg, lineno, colno, pos) - ValueError.__init__(self, emsg) - self.msg = msg - self.doc = doc - self.pos = pos - self.lineno = lineno - self.colno = colno - - -# Matches a TOML number, which allows underscores for readability -_number_with_underscores = re.compile('([0-9])(_([0-9]))*') - - -class CommentValue(object): - def __init__(self, val, comment, beginline, _dict): - self.val = val - separator = "\n" if beginline else " " - self.comment = separator + comment - self._dict = _dict - - def __getitem__(self, key): - return self.val[key] - - def __setitem__(self, key, value): - self.val[key] = value - - def dump(self, dump_value_func): - retstr = dump_value_func(self.val) - if isinstance(self.val, self._dict): - return self.comment + "\n" + unicode(retstr) - else: - return unicode(retstr) + self.comment - - -def _strictly_valid_num(n): - n = n.strip() - if not n: - return False - if n[0] == '_': - return False - if n[-1] == '_': - return False - if "_." in n or "._" in n: - return False - if len(n) == 1: - return True - if n[0] == '0' and n[1] not in ['.', 'o', 'b', 'x']: - return False - if n[0] == '+' or n[0] == '-': - n = n[1:] - if len(n) > 1 and n[0] == '0' and n[1] != '.': - return False - if '__' in n: - return False - return True - - -def load(f, _dict=dict, decoder=None): - """Parses named file or files as toml and returns a dictionary - - Args: - f: Path to the file to open, array of files to read into single dict - or a file descriptor - _dict: (optional) Specifies the class of the returned toml dictionary - decoder: The decoder to use - - Returns: - Parsed toml file represented as a dictionary - - Raises: - TypeError -- When f is invalid type - TomlDecodeError: Error while decoding toml - IOError / FileNotFoundError -- When an array with no valid (existing) - (Python 2 / Python 3) file paths is passed - """ - - if _ispath(f): - with io.open(_getpath(f), encoding='utf-8') as ffile: - return loads(ffile.read(), _dict, decoder) - elif isinstance(f, list): - from os import path as op - from warnings import warn - if not [path for path in f if op.exists(path)]: - error_msg = "Load expects a list to contain filenames only." - error_msg += linesep - error_msg += ("The list needs to contain the path of at least one " - "existing file.") - raise FNFError(error_msg) - if decoder is None: - decoder = TomlDecoder(_dict) - d = decoder.get_empty_table() - for l in f: # noqa: E741 - if op.exists(l): - d.update(load(l, _dict, decoder)) - else: - warn("Non-existent filename in list with at least one valid " - "filename") - return d - else: - try: - return loads(f.read(), _dict, decoder) - except AttributeError: - raise TypeError("You can only load a file descriptor, filename or " - "list") - - -_groupname_re = re.compile(r'^[A-Za-z0-9_-]+$') - - -def loads(s, _dict=dict, decoder=None): - """Parses string as toml - - Args: - s: String to be parsed - _dict: (optional) Specifies the class of the returned toml dictionary - - Returns: - Parsed toml file represented as a dictionary - - Raises: - TypeError: When a non-string is passed - TomlDecodeError: Error while decoding toml - """ - - implicitgroups = [] - if decoder is None: - decoder = TomlDecoder(_dict) - retval = decoder.get_empty_table() - currentlevel = retval - if not isinstance(s, basestring): - raise TypeError("Expecting something like a string") - - if not isinstance(s, unicode): - s = s.decode('utf8') - - original = s - sl = list(s) - openarr = 0 - openstring = False - openstrchar = "" - multilinestr = False - arrayoftables = False - beginline = True - keygroup = False - dottedkey = False - keyname = 0 - key = '' - prev_key = '' - line_no = 1 - - for i, item in enumerate(sl): - if item == '\r' and sl[i + 1] == '\n': - sl[i] = ' ' - continue - if keyname: - key += item - if item == '\n': - raise TomlDecodeError("Key name found without value." - " Reached end of line.", original, i) - if openstring: - if item == openstrchar: - oddbackslash = False - k = 1 - while i >= k and sl[i - k] == '\\': - oddbackslash = not oddbackslash - k += 1 - if not oddbackslash: - keyname = 2 - openstring = False - openstrchar = "" - continue - elif keyname == 1: - if item.isspace(): - keyname = 2 - continue - elif item == '.': - dottedkey = True - continue - elif item.isalnum() or item == '_' or item == '-': - continue - elif (dottedkey and sl[i - 1] == '.' and - (item == '"' or item == "'")): - openstring = True - openstrchar = item - continue - elif keyname == 2: - if item.isspace(): - if dottedkey: - nextitem = sl[i + 1] - if not nextitem.isspace() and nextitem != '.': - keyname = 1 - continue - if item == '.': - dottedkey = True - nextitem = sl[i + 1] - if not nextitem.isspace() and nextitem != '.': - keyname = 1 - continue - if item == '=': - keyname = 0 - prev_key = key[:-1].rstrip() - key = '' - dottedkey = False - else: - raise TomlDecodeError("Found invalid character in key name: '" + - item + "'. Try quoting the key name.", - original, i) - if item == "'" and openstrchar != '"': - k = 1 - try: - while sl[i - k] == "'": - k += 1 - if k == 3: - break - except IndexError: - pass - if k == 3: - multilinestr = not multilinestr - openstring = multilinestr - else: - openstring = not openstring - if openstring: - openstrchar = "'" - else: - openstrchar = "" - if item == '"' and openstrchar != "'": - oddbackslash = False - k = 1 - tripquote = False - try: - while sl[i - k] == '"': - k += 1 - if k == 3: - tripquote = True - break - if k == 1 or (k == 3 and tripquote): - while sl[i - k] == '\\': - oddbackslash = not oddbackslash - k += 1 - except IndexError: - pass - if not oddbackslash: - if tripquote: - multilinestr = not multilinestr - openstring = multilinestr - else: - openstring = not openstring - if openstring: - openstrchar = '"' - else: - openstrchar = "" - if item == '#' and (not openstring and not keygroup and - not arrayoftables): - j = i - comment = "" - try: - while sl[j] != '\n': - comment += s[j] - sl[j] = ' ' - j += 1 - except IndexError: - break - if not openarr: - decoder.preserve_comment(line_no, prev_key, comment, beginline) - if item == '[' and (not openstring and not keygroup and - not arrayoftables): - if beginline: - if len(sl) > i + 1 and sl[i + 1] == '[': - arrayoftables = True - else: - keygroup = True - else: - openarr += 1 - if item == ']' and not openstring: - if keygroup: - keygroup = False - elif arrayoftables: - if sl[i - 1] == ']': - arrayoftables = False - else: - openarr -= 1 - if item == '\n': - if openstring or multilinestr: - if not multilinestr: - raise TomlDecodeError("Unbalanced quotes", original, i) - if ((sl[i - 1] == "'" or sl[i - 1] == '"') and ( - sl[i - 2] == sl[i - 1])): - sl[i] = sl[i - 1] - if sl[i - 3] == sl[i - 1]: - sl[i - 3] = ' ' - elif openarr: - sl[i] = ' ' - else: - beginline = True - line_no += 1 - elif beginline and sl[i] != ' ' and sl[i] != '\t': - beginline = False - if not keygroup and not arrayoftables: - if sl[i] == '=': - raise TomlDecodeError("Found empty keyname. ", original, i) - keyname = 1 - key += item - if keyname: - raise TomlDecodeError("Key name found without value." - " Reached end of file.", original, len(s)) - if openstring: # reached EOF and have an unterminated string - raise TomlDecodeError("Unterminated string found." - " Reached end of file.", original, len(s)) - s = ''.join(sl) - s = s.split('\n') - multikey = None - multilinestr = "" - multibackslash = False - pos = 0 - for idx, line in enumerate(s): - if idx > 0: - pos += len(s[idx - 1]) + 1 - - decoder.embed_comments(idx, currentlevel) - - if not multilinestr or multibackslash or '\n' not in multilinestr: - line = line.strip() - if line == "" and (not multikey or multibackslash): - continue - if multikey: - if multibackslash: - multilinestr += line - else: - multilinestr += line - multibackslash = False - closed = False - if multilinestr[0] == '[': - closed = line[-1] == ']' - elif len(line) > 2: - closed = (line[-1] == multilinestr[0] and - line[-2] == multilinestr[0] and - line[-3] == multilinestr[0]) - if closed: - try: - value, vtype = decoder.load_value(multilinestr) - except ValueError as err: - raise TomlDecodeError(str(err), original, pos) - currentlevel[multikey] = value - multikey = None - multilinestr = "" - else: - k = len(multilinestr) - 1 - while k > -1 and multilinestr[k] == '\\': - multibackslash = not multibackslash - k -= 1 - if multibackslash: - multilinestr = multilinestr[:-1] - else: - multilinestr += "\n" - continue - if line[0] == '[': - arrayoftables = False - if len(line) == 1: - raise TomlDecodeError("Opening key group bracket on line by " - "itself.", original, pos) - if line[1] == '[': - arrayoftables = True - line = line[2:] - splitstr = ']]' - else: - line = line[1:] - splitstr = ']' - i = 1 - quotesplits = decoder._get_split_on_quotes(line) - quoted = False - for quotesplit in quotesplits: - if not quoted and splitstr in quotesplit: - break - i += quotesplit.count(splitstr) - quoted = not quoted - line = line.split(splitstr, i) - if len(line) < i + 1 or line[-1].strip() != "": - raise TomlDecodeError("Key group not on a line by itself.", - original, pos) - groups = splitstr.join(line[:-1]).split('.') - i = 0 - while i < len(groups): - groups[i] = groups[i].strip() - if len(groups[i]) > 0 and (groups[i][0] == '"' or - groups[i][0] == "'"): - groupstr = groups[i] - j = i + 1 - while ((not groupstr[0] == groupstr[-1]) or - len(groupstr) == 1): - j += 1 - if j > len(groups) + 2: - raise TomlDecodeError("Invalid group name '" + - groupstr + "' Something " + - "went wrong.", original, pos) - groupstr = '.'.join(groups[i:j]).strip() - groups[i] = groupstr[1:-1] - groups[i + 1:j] = [] - else: - if not _groupname_re.match(groups[i]): - raise TomlDecodeError("Invalid group name '" + - groups[i] + "'. Try quoting it.", - original, pos) - i += 1 - currentlevel = retval - for i in _range(len(groups)): - group = groups[i] - if group == "": - raise TomlDecodeError("Can't have a keygroup with an empty " - "name", original, pos) - try: - currentlevel[group] - if i == len(groups) - 1: - if group in implicitgroups: - implicitgroups.remove(group) - if arrayoftables: - raise TomlDecodeError("An implicitly defined " - "table can't be an array", - original, pos) - elif arrayoftables: - currentlevel[group].append(decoder.get_empty_table() - ) - else: - raise TomlDecodeError("What? " + group + - " already exists?" + - str(currentlevel), - original, pos) - except TypeError: - currentlevel = currentlevel[-1] - if group not in currentlevel: - currentlevel[group] = decoder.get_empty_table() - if i == len(groups) - 1 and arrayoftables: - currentlevel[group] = [decoder.get_empty_table()] - except KeyError: - if i != len(groups) - 1: - implicitgroups.append(group) - currentlevel[group] = decoder.get_empty_table() - if i == len(groups) - 1 and arrayoftables: - currentlevel[group] = [decoder.get_empty_table()] - currentlevel = currentlevel[group] - if arrayoftables: - try: - currentlevel = currentlevel[-1] - except KeyError: - pass - elif line[0] == "{": - if line[-1] != "}": - raise TomlDecodeError("Line breaks are not allowed in inline" - "objects", original, pos) - try: - decoder.load_inline_object(line, currentlevel, multikey, - multibackslash) - except ValueError as err: - raise TomlDecodeError(str(err), original, pos) - elif "=" in line: - try: - ret = decoder.load_line(line, currentlevel, multikey, - multibackslash) - except ValueError as err: - raise TomlDecodeError(str(err), original, pos) - if ret is not None: - multikey, multilinestr, multibackslash = ret - return retval - - -def _load_date(val): - microsecond = 0 - tz = None - try: - if len(val) > 19: - if val[19] == '.': - if val[-1].upper() == 'Z': - subsecondval = val[20:-1] - tzval = "Z" - else: - subsecondvalandtz = val[20:] - if '+' in subsecondvalandtz: - splitpoint = subsecondvalandtz.index('+') - subsecondval = subsecondvalandtz[:splitpoint] - tzval = subsecondvalandtz[splitpoint:] - elif '-' in subsecondvalandtz: - splitpoint = subsecondvalandtz.index('-') - subsecondval = subsecondvalandtz[:splitpoint] - tzval = subsecondvalandtz[splitpoint:] - else: - tzval = None - subsecondval = subsecondvalandtz - if tzval is not None: - tz = TomlTz(tzval) - microsecond = int(int(subsecondval) * - (10 ** (6 - len(subsecondval)))) - else: - tz = TomlTz(val[19:]) - except ValueError: - tz = None - if "-" not in val[1:]: - return None - try: - if len(val) == 10: - d = datetime.date( - int(val[:4]), int(val[5:7]), - int(val[8:10])) - else: - d = datetime.datetime( - int(val[:4]), int(val[5:7]), - int(val[8:10]), int(val[11:13]), - int(val[14:16]), int(val[17:19]), microsecond, tz) - except ValueError: - return None - return d - - -def _load_unicode_escapes(v, hexbytes, prefix): - skip = False - i = len(v) - 1 - while i > -1 and v[i] == '\\': - skip = not skip - i -= 1 - for hx in hexbytes: - if skip: - skip = False - i = len(hx) - 1 - while i > -1 and hx[i] == '\\': - skip = not skip - i -= 1 - v += prefix - v += hx - continue - hxb = "" - i = 0 - hxblen = 4 - if prefix == "\\U": - hxblen = 8 - hxb = ''.join(hx[i:i + hxblen]).lower() - if hxb.strip('0123456789abcdef'): - raise ValueError("Invalid escape sequence: " + hxb) - if hxb[0] == "d" and hxb[1].strip('01234567'): - raise ValueError("Invalid escape sequence: " + hxb + - ". Only scalar unicode points are allowed.") - v += unichr(int(hxb, 16)) - v += unicode(hx[len(hxb):]) - return v - - -# Unescape TOML string values. - -# content after the \ -_escapes = ['0', 'b', 'f', 'n', 'r', 't', '"'] -# What it should be replaced by -_escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"'] -# Used for substitution -_escape_to_escapedchars = dict(zip(_escapes, _escapedchars)) - - -def _unescape(v): - """Unescape characters in a TOML string.""" - i = 0 - backslash = False - while i < len(v): - if backslash: - backslash = False - if v[i] in _escapes: - v = v[:i - 1] + _escape_to_escapedchars[v[i]] + v[i + 1:] - elif v[i] == '\\': - v = v[:i - 1] + v[i:] - elif v[i] == 'u' or v[i] == 'U': - i += 1 - else: - raise ValueError("Reserved escape sequence used") - continue - elif v[i] == '\\': - backslash = True - i += 1 - return v - - -class InlineTableDict(object): - """Sentinel subclass of dict for inline tables.""" - - -class TomlDecoder(object): - - def __init__(self, _dict=dict): - self._dict = _dict - - def get_empty_table(self): - return self._dict() - - def get_empty_inline_table(self): - class DynamicInlineTableDict(self._dict, InlineTableDict): - """Concrete sentinel subclass for inline tables. - It is a subclass of _dict which is passed in dynamically at load - time - - It is also a subclass of InlineTableDict - """ - - return DynamicInlineTableDict() - - def load_inline_object(self, line, currentlevel, multikey=False, - multibackslash=False): - candidate_groups = line[1:-1].split(",") - groups = [] - if len(candidate_groups) == 1 and not candidate_groups[0].strip(): - candidate_groups.pop() - while len(candidate_groups) > 0: - candidate_group = candidate_groups.pop(0) - try: - _, value = candidate_group.split('=', 1) - except ValueError: - raise ValueError("Invalid inline table encountered") - value = value.strip() - if ((value[0] == value[-1] and value[0] in ('"', "'")) or ( - value[0] in '-0123456789' or - value in ('true', 'false') or - (value[0] == "[" and value[-1] == "]") or - (value[0] == '{' and value[-1] == '}'))): - groups.append(candidate_group) - elif len(candidate_groups) > 0: - candidate_groups[0] = (candidate_group + "," + - candidate_groups[0]) - else: - raise ValueError("Invalid inline table value encountered") - for group in groups: - status = self.load_line(group, currentlevel, multikey, - multibackslash) - if status is not None: - break - - def _get_split_on_quotes(self, line): - doublequotesplits = line.split('"') - quoted = False - quotesplits = [] - if len(doublequotesplits) > 1 and "'" in doublequotesplits[0]: - singlequotesplits = doublequotesplits[0].split("'") - doublequotesplits = doublequotesplits[1:] - while len(singlequotesplits) % 2 == 0 and len(doublequotesplits): - singlequotesplits[-1] += '"' + doublequotesplits[0] - doublequotesplits = doublequotesplits[1:] - if "'" in singlequotesplits[-1]: - singlequotesplits = (singlequotesplits[:-1] + - singlequotesplits[-1].split("'")) - quotesplits += singlequotesplits - for doublequotesplit in doublequotesplits: - if quoted: - quotesplits.append(doublequotesplit) - else: - quotesplits += doublequotesplit.split("'") - quoted = not quoted - return quotesplits - - def load_line(self, line, currentlevel, multikey, multibackslash): - i = 1 - quotesplits = self._get_split_on_quotes(line) - quoted = False - for quotesplit in quotesplits: - if not quoted and '=' in quotesplit: - break - i += quotesplit.count('=') - quoted = not quoted - pair = line.split('=', i) - strictly_valid = _strictly_valid_num(pair[-1]) - if _number_with_underscores.match(pair[-1]): - pair[-1] = pair[-1].replace('_', '') - while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and - pair[-1][0] != "'" and pair[-1][0] != '"' and - pair[-1][0] != '[' and pair[-1][0] != '{' and - pair[-1].strip() != 'true' and - pair[-1].strip() != 'false'): - try: - float(pair[-1]) - break - except ValueError: - pass - if _load_date(pair[-1]) is not None: - break - if TIME_RE.match(pair[-1]): - break - i += 1 - prev_val = pair[-1] - pair = line.split('=', i) - if prev_val == pair[-1]: - raise ValueError("Invalid date or number") - if strictly_valid: - strictly_valid = _strictly_valid_num(pair[-1]) - pair = ['='.join(pair[:-1]).strip(), pair[-1].strip()] - if '.' in pair[0]: - if '"' in pair[0] or "'" in pair[0]: - quotesplits = self._get_split_on_quotes(pair[0]) - quoted = False - levels = [] - for quotesplit in quotesplits: - if quoted: - levels.append(quotesplit) - else: - levels += [level.strip() for level in - quotesplit.split('.')] - quoted = not quoted - else: - levels = pair[0].split('.') - while levels[-1] == "": - levels = levels[:-1] - for level in levels[:-1]: - if level == "": - continue - if level not in currentlevel: - currentlevel[level] = self.get_empty_table() - currentlevel = currentlevel[level] - pair[0] = levels[-1].strip() - elif (pair[0][0] == '"' or pair[0][0] == "'") and \ - (pair[0][-1] == pair[0][0]): - pair[0] = _unescape(pair[0][1:-1]) - k, koffset = self._load_line_multiline_str(pair[1]) - if k > -1: - while k > -1 and pair[1][k + koffset] == '\\': - multibackslash = not multibackslash - k -= 1 - if multibackslash: - multilinestr = pair[1][:-1] - else: - multilinestr = pair[1] + "\n" - multikey = pair[0] - else: - value, vtype = self.load_value(pair[1], strictly_valid) - try: - currentlevel[pair[0]] - raise ValueError("Duplicate keys!") - except TypeError: - raise ValueError("Duplicate keys!") - except KeyError: - if multikey: - return multikey, multilinestr, multibackslash - else: - currentlevel[pair[0]] = value - - def _load_line_multiline_str(self, p): - poffset = 0 - if len(p) < 3: - return -1, poffset - if p[0] == '[' and (p.strip()[-1] != ']' and - self._load_array_isstrarray(p)): - newp = p[1:].strip().split(',') - while len(newp) > 1 and newp[-1][0] != '"' and newp[-1][0] != "'": - newp = newp[:-2] + [newp[-2] + ',' + newp[-1]] - newp = newp[-1] - poffset = len(p) - len(newp) - p = newp - if p[0] != '"' and p[0] != "'": - return -1, poffset - if p[1] != p[0] or p[2] != p[0]: - return -1, poffset - if len(p) > 5 and p[-1] == p[0] and p[-2] == p[0] and p[-3] == p[0]: - return -1, poffset - return len(p) - 1, poffset - - def load_value(self, v, strictly_valid=True): - if not v: - raise ValueError("Empty value is invalid") - if v == 'true': - return (True, "bool") - elif v.lower() == 'true': - raise ValueError("Only all lowercase booleans allowed") - elif v == 'false': - return (False, "bool") - elif v.lower() == 'false': - raise ValueError("Only all lowercase booleans allowed") - elif v[0] == '"' or v[0] == "'": - quotechar = v[0] - testv = v[1:].split(quotechar) - triplequote = False - triplequotecount = 0 - if len(testv) > 1 and testv[0] == '' and testv[1] == '': - testv = testv[2:] - triplequote = True - closed = False - for tv in testv: - if tv == '': - if triplequote: - triplequotecount += 1 - else: - closed = True - else: - oddbackslash = False - try: - i = -1 - j = tv[i] - while j == '\\': - oddbackslash = not oddbackslash - i -= 1 - j = tv[i] - except IndexError: - pass - if not oddbackslash: - if closed: - raise ValueError("Found tokens after a closed " + - "string. Invalid TOML.") - else: - if not triplequote or triplequotecount > 1: - closed = True - else: - triplequotecount = 0 - if quotechar == '"': - escapeseqs = v.split('\\')[1:] - backslash = False - for i in escapeseqs: - if i == '': - backslash = not backslash - else: - if i[0] not in _escapes and (i[0] != 'u' and - i[0] != 'U' and - not backslash): - raise ValueError("Reserved escape sequence used") - if backslash: - backslash = False - for prefix in ["\\u", "\\U"]: - if prefix in v: - hexbytes = v.split(prefix) - v = _load_unicode_escapes(hexbytes[0], hexbytes[1:], - prefix) - v = _unescape(v) - if len(v) > 1 and v[1] == quotechar and (len(v) < 3 or - v[1] == v[2]): - v = v[2:-2] - return (v[1:-1], "str") - elif v[0] == '[': - return (self.load_array(v), "array") - elif v[0] == '{': - inline_object = self.get_empty_inline_table() - self.load_inline_object(v, inline_object) - return (inline_object, "inline_object") - elif TIME_RE.match(v): - h, m, s, _, ms = TIME_RE.match(v).groups() - time = datetime.time(int(h), int(m), int(s), int(ms) if ms else 0) - return (time, "time") - else: - parsed_date = _load_date(v) - if parsed_date is not None: - return (parsed_date, "date") - if not strictly_valid: - raise ValueError("Weirdness with leading zeroes or " - "underscores in your number.") - itype = "int" - neg = False - if v[0] == '-': - neg = True - v = v[1:] - elif v[0] == '+': - v = v[1:] - v = v.replace('_', '') - lowerv = v.lower() - if '.' in v or ('x' not in v and ('e' in v or 'E' in v)): - if '.' in v and v.split('.', 1)[1] == '': - raise ValueError("This float is missing digits after " - "the point") - if v[0] not in '0123456789': - raise ValueError("This float doesn't have a leading " - "digit") - v = float(v) - itype = "float" - elif len(lowerv) == 3 and (lowerv == 'inf' or lowerv == 'nan'): - v = float(v) - itype = "float" - if itype == "int": - v = int(v, 0) - if neg: - return (0 - v, itype) - return (v, itype) - - def bounded_string(self, s): - if len(s) == 0: - return True - if s[-1] != s[0]: - return False - i = -2 - backslash = False - while len(s) + i > 0: - if s[i] == "\\": - backslash = not backslash - i -= 1 - else: - break - return not backslash - - def _load_array_isstrarray(self, a): - a = a[1:-1].strip() - if a != '' and (a[0] == '"' or a[0] == "'"): - return True - return False - - def load_array(self, a): - atype = None - retval = [] - a = a.strip() - if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip(): - strarray = self._load_array_isstrarray(a) - if not a[1:-1].strip().startswith('{'): - a = a[1:-1].split(',') - else: - # a is an inline object, we must find the matching parenthesis - # to define groups - new_a = [] - start_group_index = 1 - end_group_index = 2 - open_bracket_count = 1 if a[start_group_index] == '{' else 0 - in_str = False - while end_group_index < len(a[1:]): - if a[end_group_index] == '"' or a[end_group_index] == "'": - if in_str: - backslash_index = end_group_index - 1 - while (backslash_index > -1 and - a[backslash_index] == '\\'): - in_str = not in_str - backslash_index -= 1 - in_str = not in_str - if not in_str and a[end_group_index] == '{': - open_bracket_count += 1 - if in_str or a[end_group_index] != '}': - end_group_index += 1 - continue - elif a[end_group_index] == '}' and open_bracket_count > 1: - open_bracket_count -= 1 - end_group_index += 1 - continue - - # Increase end_group_index by 1 to get the closing bracket - end_group_index += 1 - - new_a.append(a[start_group_index:end_group_index]) - - # The next start index is at least after the closing - # bracket, a closing bracket can be followed by a comma - # since we are in an array. - start_group_index = end_group_index + 1 - while (start_group_index < len(a[1:]) and - a[start_group_index] != '{'): - start_group_index += 1 - end_group_index = start_group_index + 1 - a = new_a - b = 0 - if strarray: - while b < len(a) - 1: - ab = a[b].strip() - while (not self.bounded_string(ab) or - (len(ab) > 2 and - ab[0] == ab[1] == ab[2] and - ab[-2] != ab[0] and - ab[-3] != ab[0])): - a[b] = a[b] + ',' + a[b + 1] - ab = a[b].strip() - if b < len(a) - 2: - a = a[:b + 1] + a[b + 2:] - else: - a = a[:b + 1] - b += 1 - else: - al = list(a[1:-1]) - a = [] - openarr = 0 - j = 0 - for i in _range(len(al)): - if al[i] == '[': - openarr += 1 - elif al[i] == ']': - openarr -= 1 - elif al[i] == ',' and not openarr: - a.append(''.join(al[j:i])) - j = i + 1 - a.append(''.join(al[j:])) - for i in _range(len(a)): - a[i] = a[i].strip() - if a[i] != '': - nval, ntype = self.load_value(a[i]) - if atype: - if ntype != atype: - raise ValueError("Not a homogeneous array") - else: - atype = ntype - retval.append(nval) - return retval - - def preserve_comment(self, line_no, key, comment, beginline): - pass - - def embed_comments(self, idx, currentlevel): - pass - - -class TomlPreserveCommentDecoder(TomlDecoder): - - def __init__(self, _dict=dict): - self.saved_comments = {} - super(TomlPreserveCommentDecoder, self).__init__(_dict) - - def preserve_comment(self, line_no, key, comment, beginline): - self.saved_comments[line_no] = (key, comment, beginline) - - def embed_comments(self, idx, currentlevel): - if idx not in self.saved_comments: - return - - key, comment, beginline = self.saved_comments[idx] - currentlevel[key] = CommentValue(currentlevel[key], comment, beginline, - self._dict) diff --git a/env/Lib/site-packages/pip/_vendor/toml/encoder.py b/env/Lib/site-packages/pip/_vendor/toml/encoder.py deleted file mode 100644 index 7fb94da98ac72fd344b20952c3073ac87da4fe27..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/toml/encoder.py +++ /dev/null @@ -1,304 +0,0 @@ -import datetime -import re -import sys -from decimal import Decimal - -from pip._vendor.toml.decoder import InlineTableDict - -if sys.version_info >= (3,): - unicode = str - - -def dump(o, f, encoder=None): - """Writes out dict as toml to a file - - Args: - o: Object to dump into toml - f: File descriptor where the toml should be stored - encoder: The ``TomlEncoder`` to use for constructing the output string - - Returns: - String containing the toml corresponding to dictionary - - Raises: - TypeError: When anything other than file descriptor is passed - """ - - if not f.write: - raise TypeError("You can only dump an object to a file descriptor") - d = dumps(o, encoder=encoder) - f.write(d) - return d - - -def dumps(o, encoder=None): - """Stringifies input dict as toml - - Args: - o: Object to dump into toml - encoder: The ``TomlEncoder`` to use for constructing the output string - - Returns: - String containing the toml corresponding to dict - - Examples: - ```python - >>> import toml - >>> output = { - ... 'a': "I'm a string", - ... 'b': ["I'm", "a", "list"], - ... 'c': 2400 - ... } - >>> toml.dumps(output) - 'a = "I\'m a string"\nb = [ "I\'m", "a", "list",]\nc = 2400\n' - ``` - """ - - retval = "" - if encoder is None: - encoder = TomlEncoder(o.__class__) - addtoretval, sections = encoder.dump_sections(o, "") - retval += addtoretval - outer_objs = [id(o)] - while sections: - section_ids = [id(section) for section in sections.values()] - for outer_obj in outer_objs: - if outer_obj in section_ids: - raise ValueError("Circular reference detected") - outer_objs += section_ids - newsections = encoder.get_empty_table() - for section in sections: - addtoretval, addtosections = encoder.dump_sections( - sections[section], section) - - if addtoretval or (not addtoretval and not addtosections): - if retval and retval[-2:] != "\n\n": - retval += "\n" - retval += "[" + section + "]\n" - if addtoretval: - retval += addtoretval - for s in addtosections: - newsections[section + "." + s] = addtosections[s] - sections = newsections - return retval - - -def _dump_str(v): - if sys.version_info < (3,) and hasattr(v, 'decode') and isinstance(v, str): - v = v.decode('utf-8') - v = "%r" % v - if v[0] == 'u': - v = v[1:] - singlequote = v.startswith("'") - if singlequote or v.startswith('"'): - v = v[1:-1] - if singlequote: - v = v.replace("\\'", "'") - v = v.replace('"', '\\"') - v = v.split("\\x") - while len(v) > 1: - i = -1 - if not v[0]: - v = v[1:] - v[0] = v[0].replace("\\\\", "\\") - # No, I don't know why != works and == breaks - joinx = v[0][i] != "\\" - while v[0][:i] and v[0][i] == "\\": - joinx = not joinx - i -= 1 - if joinx: - joiner = "x" - else: - joiner = "u00" - v = [v[0] + joiner + v[1]] + v[2:] - return unicode('"' + v[0] + '"') - - -def _dump_float(v): - return "{}".format(v).replace("e+0", "e+").replace("e-0", "e-") - - -def _dump_time(v): - utcoffset = v.utcoffset() - if utcoffset is None: - return v.isoformat() - # The TOML norm specifies that it's local time thus we drop the offset - return v.isoformat()[:-6] - - -class TomlEncoder(object): - - def __init__(self, _dict=dict, preserve=False): - self._dict = _dict - self.preserve = preserve - self.dump_funcs = { - str: _dump_str, - unicode: _dump_str, - list: self.dump_list, - bool: lambda v: unicode(v).lower(), - int: lambda v: v, - float: _dump_float, - Decimal: _dump_float, - datetime.datetime: lambda v: v.isoformat().replace('+00:00', 'Z'), - datetime.time: _dump_time, - datetime.date: lambda v: v.isoformat() - } - - def get_empty_table(self): - return self._dict() - - def dump_list(self, v): - retval = "[" - for u in v: - retval += " " + unicode(self.dump_value(u)) + "," - retval += "]" - return retval - - def dump_inline_table(self, section): - """Preserve inline table in its compact syntax instead of expanding - into subsection. - - https://github.com/toml-lang/toml#user-content-inline-table - """ - retval = "" - if isinstance(section, dict): - val_list = [] - for k, v in section.items(): - val = self.dump_inline_table(v) - val_list.append(k + " = " + val) - retval += "{ " + ", ".join(val_list) + " }\n" - return retval - else: - return unicode(self.dump_value(section)) - - def dump_value(self, v): - # Lookup function corresponding to v's type - dump_fn = self.dump_funcs.get(type(v)) - if dump_fn is None and hasattr(v, '__iter__'): - dump_fn = self.dump_funcs[list] - # Evaluate function (if it exists) else return v - return dump_fn(v) if dump_fn is not None else self.dump_funcs[str](v) - - def dump_sections(self, o, sup): - retstr = "" - if sup != "" and sup[-1] != ".": - sup += '.' - retdict = self._dict() - arraystr = "" - for section in o: - section = unicode(section) - qsection = section - if not re.match(r'^[A-Za-z0-9_-]+$', section): - qsection = _dump_str(section) - if not isinstance(o[section], dict): - arrayoftables = False - if isinstance(o[section], list): - for a in o[section]: - if isinstance(a, dict): - arrayoftables = True - if arrayoftables: - for a in o[section]: - arraytabstr = "\n" - arraystr += "[[" + sup + qsection + "]]\n" - s, d = self.dump_sections(a, sup + qsection) - if s: - if s[0] == "[": - arraytabstr += s - else: - arraystr += s - while d: - newd = self._dict() - for dsec in d: - s1, d1 = self.dump_sections(d[dsec], sup + - qsection + "." + - dsec) - if s1: - arraytabstr += ("[" + sup + qsection + - "." + dsec + "]\n") - arraytabstr += s1 - for s1 in d1: - newd[dsec + "." + s1] = d1[s1] - d = newd - arraystr += arraytabstr - else: - if o[section] is not None: - retstr += (qsection + " = " + - unicode(self.dump_value(o[section])) + '\n') - elif self.preserve and isinstance(o[section], InlineTableDict): - retstr += (qsection + " = " + - self.dump_inline_table(o[section])) - else: - retdict[qsection] = o[section] - retstr += arraystr - return (retstr, retdict) - - -class TomlPreserveInlineDictEncoder(TomlEncoder): - - def __init__(self, _dict=dict): - super(TomlPreserveInlineDictEncoder, self).__init__(_dict, True) - - -class TomlArraySeparatorEncoder(TomlEncoder): - - def __init__(self, _dict=dict, preserve=False, separator=","): - super(TomlArraySeparatorEncoder, self).__init__(_dict, preserve) - if separator.strip() == "": - separator = "," + separator - elif separator.strip(' \t\n\r,'): - raise ValueError("Invalid separator for arrays") - self.separator = separator - - def dump_list(self, v): - t = [] - retval = "[" - for u in v: - t.append(self.dump_value(u)) - while t != []: - s = [] - for u in t: - if isinstance(u, list): - for r in u: - s.append(r) - else: - retval += " " + unicode(u) + self.separator - t = s - retval += "]" - return retval - - -class TomlNumpyEncoder(TomlEncoder): - - def __init__(self, _dict=dict, preserve=False): - import numpy as np - super(TomlNumpyEncoder, self).__init__(_dict, preserve) - self.dump_funcs[np.float16] = _dump_float - self.dump_funcs[np.float32] = _dump_float - self.dump_funcs[np.float64] = _dump_float - self.dump_funcs[np.int16] = self._dump_int - self.dump_funcs[np.int32] = self._dump_int - self.dump_funcs[np.int64] = self._dump_int - - def _dump_int(self, v): - return "{}".format(int(v)) - - -class TomlPreserveCommentEncoder(TomlEncoder): - - def __init__(self, _dict=dict, preserve=False): - from pip._vendor.toml.decoder import CommentValue - super(TomlPreserveCommentEncoder, self).__init__(_dict, preserve) - self.dump_funcs[CommentValue] = lambda v: v.dump(self.dump_value) - - -class TomlPathlibEncoder(TomlEncoder): - - def _dump_pathlib_path(self, v): - return _dump_str(str(v)) - - def dump_value(self, v): - if (3, 4) <= sys.version_info: - import pathlib - if isinstance(v, pathlib.PurePath): - v = str(v) - return super(TomlPathlibEncoder, self).dump_value(v) diff --git a/env/Lib/site-packages/pip/_vendor/toml/ordered.py b/env/Lib/site-packages/pip/_vendor/toml/ordered.py deleted file mode 100644 index 6052016e8e6536a8cf58fbbc72b9042f3e070fce..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/toml/ordered.py +++ /dev/null @@ -1,15 +0,0 @@ -from collections import OrderedDict -from pip._vendor.toml import TomlEncoder -from pip._vendor.toml import TomlDecoder - - -class TomlOrderedDecoder(TomlDecoder): - - def __init__(self): - super(self.__class__, self).__init__(_dict=OrderedDict) - - -class TomlOrderedEncoder(TomlEncoder): - - def __init__(self): - super(self.__class__, self).__init__(_dict=OrderedDict) diff --git a/env/Lib/site-packages/pip/_vendor/toml/tz.py b/env/Lib/site-packages/pip/_vendor/toml/tz.py deleted file mode 100644 index bf20593a2647cb2cf0a858fb19734874867352ef..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/toml/tz.py +++ /dev/null @@ -1,24 +0,0 @@ -from datetime import tzinfo, timedelta - - -class TomlTz(tzinfo): - def __init__(self, toml_offset): - if toml_offset == "Z": - self._raw_offset = "+00:00" - else: - self._raw_offset = toml_offset - self._sign = -1 if self._raw_offset[0] == '-' else 1 - self._hours = int(self._raw_offset[1:3]) - self._minutes = int(self._raw_offset[4:6]) - - def __deepcopy__(self, memo): - return self.__class__(self._raw_offset) - - def tzname(self, dt): - return "UTC" + self._raw_offset - - def utcoffset(self, dt): - return self._sign * timedelta(hours=self._hours, minutes=self._minutes) - - def dst(self, dt): - return timedelta(0) diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__init__.py b/env/Lib/site-packages/pip/_vendor/urllib3/__init__.py index fe86b59d782bdb09d70aa44f80370be95a667c83..c6fa38212fb559a9b51fe36b72892839efae63f5 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/__init__.py @@ -19,6 +19,23 @@ from .util.retry import Retry from .util.timeout import Timeout from .util.url import get_host +# === NOTE TO REPACKAGERS AND VENDORS === +# Please delete this block, this logic is only +# for urllib3 being distributed via PyPI. +# See: https://github.com/urllib3/urllib3/issues/2680 +try: + import urllib3_secure_extra # type: ignore # noqa: F401 +except ImportError: + pass +else: + warnings.warn( + "'urllib3[secure]' extra is deprecated and will be removed " + "in a future release of urllib3 2.x. Read more in this issue: " + "https://github.com/urllib3/urllib3/issues/2680", + category=DeprecationWarning, + stacklevel=2, + ) + __author__ = "Andrey Petrov (andrey.petrov@shazow.net)" __license__ = "MIT" __version__ = __version__ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index b0590e20ffb854e1fd19aaaddc309c7fee8edf69..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-39.pyc deleted file mode 100644 index 0d9be279d8b2c7d61f720aa03326b027a08b7f60..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-39.pyc deleted file mode 100644 index c0bd79342a7aeb9d625633d06149e15b205ed032..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/_version.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-39.pyc deleted file mode 100644 index fad7489d93a83a43191f25236d7f559d3a244f45..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-39.pyc deleted file mode 100644 index c25e2fb87d2531988733a761435734aac771227d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-39.pyc deleted file mode 100644 index b9f098e35685713970d82289a63bb3930bf72b52..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-39.pyc deleted file mode 100644 index 56130a5219588566ba17d89f0b54fd2ab1a3bd1e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-39.pyc deleted file mode 100644 index 4cab2f7c226b170283d267ba4b6af10d53616dfe..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-39.pyc deleted file mode 100644 index 76b6ecb0865fb55ce8ac8c0e6377b267c8537887..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-39.pyc deleted file mode 100644 index d45e5d369b6050a8e0bb4032ab07b627ea6a015b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-39.pyc deleted file mode 100644 index 1d8c0efa8688b1d8bfd74c549b9ba8e5c0061757..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/_version.py b/env/Lib/site-packages/pip/_vendor/urllib3/_version.py index 97c983300b0c3128fccc7075936b3d6c2dbfeee1..d69ca3145704364d2dcb49150e1ad0b4dd761425 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/_version.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/_version.py @@ -1,2 +1,2 @@ # This file is protected via CODEOWNERS -__version__ = "1.26.4" +__version__ = "1.26.16" diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/connection.py b/env/Lib/site-packages/pip/_vendor/urllib3/connection.py index 45580b7e1ea6835a328c0cb41fa41053873b58d5..54b96b19154ccaa138af6bc0a4ac2b8f763017ce 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/connection.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/connection.py @@ -51,15 +51,16 @@ from .exceptions import ( SubjectAltNameWarning, SystemTimeWarning, ) -from .packages.ssl_match_hostname import CertificateError, match_hostname from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection from .util.ssl_ import ( assert_fingerprint, create_urllib3_context, + is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) +from .util.ssl_match_hostname import CertificateError, match_hostname log = logging.getLogger(__name__) @@ -67,7 +68,7 @@ port_by_scheme = {"http": 80, "https": 443} # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. -RECENT_DATE = datetime.date(2020, 7, 1) +RECENT_DATE = datetime.date(2022, 1, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") @@ -107,6 +108,10 @@ class HTTPConnection(_HTTPConnection, object): #: Whether this connection verifies the host's certificate. is_verified = False + #: Whether this proxy connection (if used) verifies the proxy host's + #: certificate. + proxy_is_verified = None + def __init__(self, *args, **kw): if not six.PY2: kw.pop("strict", None) @@ -201,7 +206,7 @@ class HTTPConnection(_HTTPConnection, object): self._prepare_conn(conn) def putrequest(self, method, url, *args, **kwargs): - """""" + """ """ # Empty docstring because the indentation of CPython's implementation # is broken but we don't want this method in our documentation. match = _CONTAINS_CONTROL_CHAR_RE.search(method) @@ -214,7 +219,7 @@ class HTTPConnection(_HTTPConnection, object): return _HTTPConnection.putrequest(self, method, url, *args, **kwargs) def putheader(self, header, *values): - """""" + """ """ if not any(isinstance(v, str) and v == SKIP_HEADER for v in values): _HTTPConnection.putheader(self, header, *values) elif six.ensure_str(header.lower()) not in SKIPPABLE_HEADERS: @@ -224,6 +229,11 @@ class HTTPConnection(_HTTPConnection, object): ) def request(self, method, url, body=None, headers=None): + # Update the inner socket's timeout value to send the request. + # This only triggers if the connection is re-used. + if getattr(self, "sock", None) is not None: + self.sock.settimeout(self.timeout) + if headers is None: headers = {} else: @@ -249,7 +259,7 @@ class HTTPConnection(_HTTPConnection, object): self.putheader("User-Agent", _get_default_user_agent()) for header, value in headers.items(): self.putheader(header, value) - if "transfer-encoding" not in headers: + if "transfer-encoding" not in header_keys: self.putheader("Transfer-Encoding", "chunked") self.endheaders() @@ -350,17 +360,15 @@ class HTTPSConnection(HTTPConnection): def connect(self): # Add certificate verification - conn = self._new_conn() + self.sock = conn = self._new_conn() hostname = self.host tls_in_tls = False if self._is_using_tunnel(): if self.tls_in_tls_required: - conn = self._connect_tls_proxy(hostname, conn) + self.sock = conn = self._connect_tls_proxy(hostname, conn) tls_in_tls = True - self.sock = conn - # Calls self._set_hostport(), so self.host is # self._tunnel_host below. self._tunnel() @@ -490,14 +498,10 @@ class HTTPSConnection(HTTPConnection): self.ca_cert_dir, self.ca_cert_data, ) - # By default urllib3's SSLContext disables `check_hostname` and uses - # a custom check. For proxies we're good with relying on the default - # verification. - ssl_context.check_hostname = True # If no cert was provided, use only the default options for server # certificate validation - return ssl_wrap_socket( + socket = ssl_wrap_socket( sock=conn, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, @@ -506,8 +510,37 @@ class HTTPSConnection(HTTPConnection): ssl_context=ssl_context, ) + if ssl_context.verify_mode != ssl.CERT_NONE and not getattr( + ssl_context, "check_hostname", False + ): + # While urllib3 attempts to always turn off hostname matching from + # the TLS library, this cannot always be done. So we check whether + # the TLS Library still thinks it's matching hostnames. + cert = socket.getpeercert() + if not cert.get("subjectAltName", ()): + warnings.warn( + ( + "Certificate for {0} has no `subjectAltName`, falling back to check for a " + "`commonName` for now. This feature is being removed by major browsers and " + "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 " + "for details.)".format(hostname) + ), + SubjectAltNameWarning, + ) + _match_hostname(cert, hostname) + + self.proxy_is_verified = ssl_context.verify_mode == ssl.CERT_REQUIRED + return socket + def _match_hostname(cert, asserted_hostname): + # Our upstream implementation of ssl.match_hostname() + # only applies this normalization to IP addresses so it doesn't + # match DNS SANs so we do the same thing! + stripped_hostname = asserted_hostname.strip("u[]") + if is_ipaddress(stripped_hostname): + asserted_hostname = stripped_hostname + try: match_hostname(cert, asserted_hostname) except CertificateError as e: diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py b/env/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py index 4708c5bfc7862d82be92321f519f5ad0c7814d6d..96844d933745d81d1e29e33c75f2ed24e518c999 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py @@ -2,6 +2,7 @@ from __future__ import absolute_import import errno import logging +import re import socket import sys import warnings @@ -35,7 +36,6 @@ from .exceptions import ( ) from .packages import six from .packages.six.moves import queue -from .packages.ssl_match_hostname import CertificateError from .request import RequestMethods from .response import HTTPResponse from .util.connection import is_connection_dropped @@ -44,11 +44,19 @@ from .util.queue import LifoQueue from .util.request import set_file_position from .util.response import assert_header_parsing from .util.retry import Retry +from .util.ssl_match_hostname import CertificateError from .util.timeout import Timeout from .util.url import Url, _encode_target from .util.url import _normalize_host as normalize_host from .util.url import get_host, parse_url +try: # Platform-specific: Python 3 + import weakref + + weakref_finalize = weakref.finalize +except AttributeError: # Platform-specific: Python 2 + from .packages.backports.weakref_finalize import weakref_finalize + xrange = six.moves.xrange log = logging.getLogger(__name__) @@ -219,6 +227,16 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): self.conn_kw["proxy"] = self.proxy self.conn_kw["proxy_config"] = self.proxy_config + # Do not pass 'self' as callback to 'finalize'. + # Then the 'finalize' would keep an endless living (leak) to self. + # By just passing a reference to the pool allows the garbage collector + # to free self if nobody else has a reference to it. + pool = self.pool + + # Close all the HTTPConnections in the pool before the + # HTTPConnectionPool object is garbage collected. + weakref_finalize(self, _close_pool_connections, pool) + def _new_conn(self): """ Return a fresh :class:`HTTPConnection`. @@ -301,8 +319,11 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): pass except queue.Full: # This should never happen if self.block == True - log.warning("Connection pool is full, discarding connection: %s", self.host) - + log.warning( + "Connection pool is full, discarding connection: %s. Connection pool size: %s", + self.host, + self.pool.qsize(), + ) # Connection never got put back into the pool, close it. if conn: conn.close() @@ -318,7 +339,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): pass def _get_timeout(self, timeout): - """ Helper that always returns a :class:`urllib3.util.Timeout` """ + """Helper that always returns a :class:`urllib3.util.Timeout`""" if timeout is _Default: return self.timeout.clone() @@ -375,7 +396,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() - conn.timeout = timeout_obj.connect_timeout + conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout) # Trigger any extra validation we need to do. try: @@ -485,14 +506,8 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): # Disable access to the pool old_pool, self.pool = self.pool, None - try: - while True: - conn = old_pool.get(block=False) - if conn: - conn.close() - - except queue.Empty: - pass # Done. + # Close all the HTTPConnections in the pool. + _close_pool_connections(old_pool) def is_same_host(self, url): """ @@ -745,7 +760,35 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False - if isinstance(e, (BaseSSLError, CertificateError)): + + def _is_ssl_error_message_from_http_proxy(ssl_error): + # We're trying to detect the message 'WRONG_VERSION_NUMBER' but + # SSLErrors are kinda all over the place when it comes to the message, + # so we try to cover our bases here! + message = " ".join(re.split("[^a-z]", str(ssl_error).lower())) + return ( + "wrong version number" in message or "unknown protocol" in message + ) + + # Try to detect a common user error with proxies which is to + # set an HTTP proxy to be HTTPS when it should be 'http://' + # (ie {'http': 'http://proxy', 'https': 'https://proxy'}) + # Instead we add a nice error message and point to a URL. + if ( + isinstance(e, BaseSSLError) + and self.proxy + and _is_ssl_error_message_from_http_proxy(e) + and conn.proxy + and conn.proxy.scheme == "https" + ): + e = ProxyError( + "Your proxy appears to only use HTTP and not HTTPS, " + "try changing your proxy URL to be HTTP. See: " + "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" + "#https-proxy-error-http-proxy", + SSLError(e), + ) + elif isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError("Cannot connect to proxy.", e) @@ -830,7 +873,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): ) # Check if we should retry the HTTP response. - has_retry_after = bool(response.getheader("Retry-After")) + has_retry_after = bool(response.headers.get("Retry-After")) if retries.is_retry(method, response.status, has_retry_after): try: retries = retries.increment(method, url, response=response, _pool=self) @@ -1014,12 +1057,23 @@ class HTTPSConnectionPool(HTTPConnectionPool): ( "Unverified HTTPS request is being made to host '%s'. " "Adding certificate verification is strongly advised. See: " - "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#ssl-warnings" % conn.host ), InsecureRequestWarning, ) + if getattr(conn, "proxy_is_verified", None) is False: + warnings.warn( + ( + "Unverified HTTPS connection done to an HTTPS proxy. " + "Adding certificate verification is strongly advised. See: " + "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" + "#ssl-warnings" + ), + InsecureRequestWarning, + ) + def connection_from_url(url, **kw): """ @@ -1065,3 +1119,14 @@ def _normalize_host(host, scheme): if host.startswith("[") and host.endswith("]"): host = host[1:-1] return host + + +def _close_pool_connections(pool): + """Drains a queue of connections and closes each one.""" + try: + while True: + conn = pool.get(block=False) + if conn: + conn.close() + except queue.Empty: + pass # Done. diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 40e6d611f523e4472813c5a5e4ea2ff113b25b3d..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-39.pyc deleted file mode 100644 index 3e32e8d745d2691a7f5293dae56899ced8c6f0d7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-39.pyc deleted file mode 100644 index baa44069aba7776faed88519d3ff6eae68581e63..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-39.pyc deleted file mode 100644 index 4470deba90c92bfb21b4b8dff348fd5d12b54a10..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-39.pyc deleted file mode 100644 index 86e6bed6938f93244b2d9763c4905e02d23f1776..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-39.pyc deleted file mode 100644 index 3ea8ac016dae5f4200b945e5593622933a9881df..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-39.pyc deleted file mode 100644 index 8bbdbfc62c257f22db850c24fb51b81e5cc42215..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 7a98112d02adb973bace1e0d5af4cdc2621eae0b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-39.pyc deleted file mode 100644 index 5530fef9006d1747934f5a357781467c698c22f7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-39.pyc deleted file mode 100644 index f7b77d6125dde46e61d6fd9ac1b5d34a06c6f2ee..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py index 42526be7f55fa9640a4f7ccc245f7299f9483e6b..264d564dbda676b52f446c0d25433a15939a78a3 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py @@ -48,7 +48,7 @@ from ctypes import ( ) from ctypes.util import find_library -from pip._vendor.urllib3.packages.six import raise_from +from ...packages.six import raise_from if platform.system() != "Darwin": raise ImportError("Only macOS is supported") diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py index ed8120190c06fadbc1a14b328c2ca0196046b220..fa0b245d279e96724d5610f93bc3b3c8c22ca032 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py @@ -188,6 +188,7 @@ def _cert_array_from_pem(pem_bundle): # We only want to do that if an error occurs: otherwise, the caller # should free. CoreFoundation.CFRelease(cert_array) + raise return cert_array diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py index b9d2a6907c4ddb39a9ac6857d2ca6884fe7c7cf7..1717ee22cdf77849e2e273566c877f95311e691b 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py @@ -111,7 +111,7 @@ class AppEngineManager(RequestMethods): warnings.warn( "urllib3 is using URLFetch on Google App Engine sandbox instead " "of sockets. To use sockets directly instead of URLFetch see " - "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.", + "https://urllib3.readthedocs.io/en/1.26.x/reference/urllib3.contrib.html.", AppEnginePlatformWarning, ) @@ -224,7 +224,7 @@ class AppEngineManager(RequestMethods): ) # Check if we should retry the HTTP response. - has_retry_after = bool(http_response.getheader("Retry-After")) + has_retry_after = bool(http_response.headers.get("Retry-After")) if retries.is_retry(method, http_response.status, has_retry_after): retries = retries.increment(method, url, response=http_response, _pool=self) log.debug("Retry: %s", url) diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py index b2df45dcf6065647b2a8ec87d0c3c8fed475dac1..471665754e9f199f07f90107ebb350c38b378100 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py @@ -5,6 +5,7 @@ Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 """ from __future__ import absolute_import +import warnings from logging import getLogger from ntlm import ntlm @@ -12,6 +13,14 @@ from ntlm import ntlm from .. import HTTPSConnectionPool from ..packages.six.moves.http_client import HTTPSConnection +warnings.warn( + "The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed " + "in urllib3 v2.0 release, urllib3 is not able to support it properly due " + "to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. " + "If you are a user of this module please comment in the mentioned issue.", + DeprecationWarning, +) + log = getLogger(__name__) @@ -60,7 +69,7 @@ class NTLMConnectionPool(HTTPSConnectionPool): log.debug("Request headers: %s", headers) conn.request("GET", self.authurl, None, headers) res = conn.getresponse() - reshdr = dict(res.getheaders()) + reshdr = dict(res.headers) log.debug("Response status: %s %s", res.status, res.reason) log.debug("Response headers: %s", reshdr) log.debug("Response data: %s [...]", res.read(100)) @@ -92,7 +101,7 @@ class NTLMConnectionPool(HTTPSConnectionPool): conn.request("GET", self.authurl, None, headers) res = conn.getresponse() log.debug("Response status: %s %s", res.status, res.reason) - log.debug("Response headers: %s", dict(res.getheaders())) + log.debug("Response headers: %s", dict(res.headers)) log.debug("Response data: %s [...]", res.read()[:100]) if res.status != 200: if res.status == 401: diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py index bc5c114fa7e76448d8afec7dda92b876ff84b18b..19e4aa97cc138e4bd39bebf6c49ff1955cb00437 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py @@ -28,8 +28,8 @@ like this: .. code-block:: python try: - import urllib3.contrib.pyopenssl - urllib3.contrib.pyopenssl.inject_into_urllib3() + import pip._vendor.urllib3.contrib.pyopenssl as pyopenssl + pyopenssl.inject_into_urllib3() except ImportError: pass @@ -47,10 +47,10 @@ compression in Python 2 (see `CRIME attack`_). """ from __future__ import absolute_import +import OpenSSL.crypto import OpenSSL.SSL from cryptography import x509 from cryptography.hazmat.backends.openssl import backend as openssl_backend -from cryptography.hazmat.backends.openssl.x509 import _Certificate try: from cryptography.x509 import UnsupportedExtension @@ -73,9 +73,19 @@ except ImportError: # Platform-specific: Python 3 import logging import ssl import sys +import warnings from .. import util from ..packages import six +from ..util.ssl_ import PROTOCOL_TLS_CLIENT + +warnings.warn( + "'urllib3.contrib.pyopenssl' module is deprecated and will be removed " + "in a future release of urllib3 2.x. Read more in this issue: " + "https://github.com/urllib3/urllib3/issues/2680", + category=DeprecationWarning, + stacklevel=2, +) __all__ = ["inject_into_urllib3", "extract_from_urllib3"] @@ -85,6 +95,7 @@ HAS_SNI = True # Map from urllib3 to PyOpenSSL compatible parameter-values. _openssl_versions = { util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD, + PROTOCOL_TLS_CLIENT: OpenSSL.SSL.SSLv23_METHOD, ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, } @@ -217,9 +228,8 @@ def get_subj_alt_name(peer_cert): if hasattr(peer_cert, "to_cryptography"): cert = peer_cert.to_cryptography() else: - # This is technically using private APIs, but should work across all - # relevant versions before PyOpenSSL got a proper API for this. - cert = _Certificate(openssl_backend, peer_cert._x509) + der = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, peer_cert) + cert = x509.load_der_x509_certificate(der, openssl_backend) # We want to find the SAN extension. Ask Cryptography to locate it (it's # faster than looping in Python) @@ -404,7 +414,6 @@ if _fileobject: # Platform-specific: Python 2 self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) - else: # Platform-specific: Python 3 makefile = backport_makefile diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py index 8f058f5070b9d8ca7a366534c7dedcf172a90969..4a06bc69d5c850fa9f7c4861bc6b3acca3905056 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py @@ -19,8 +19,8 @@ contrib module. So...here we are. To use this module, simply import and inject it:: - import urllib3.contrib.securetransport - urllib3.contrib.securetransport.inject_into_urllib3() + import pip._vendor.urllib3.contrib.securetransport as securetransport + securetransport.inject_into_urllib3() Happy TLSing! @@ -67,6 +67,7 @@ import weakref from pip._vendor import six from .. import util +from ..util.ssl_ import PROTOCOL_TLS_CLIENT from ._securetransport.bindings import CoreFoundation, Security, SecurityConst from ._securetransport.low_level import ( _assert_no_error, @@ -154,7 +155,8 @@ CIPHER_SUITES = [ # TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. # TLSv1 to 1.2 are supported on macOS 10.8+ _protocol_to_min_max = { - util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12) + util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), + PROTOCOL_TLS_CLIENT: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), } if hasattr(ssl, "PROTOCOL_SSLv2"): @@ -768,7 +770,6 @@ if _fileobject: # Platform-specific: Python 2 self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) - else: # Platform-specific: Python 3 def makefile(self, mode="r", buffering=None, *args, **kwargs): diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py index 93df8325d59c41d7496c322460018fd513468dec..c326e80dd117458ff6e71741ca57359629b05ae4 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/contrib/socks.py @@ -51,7 +51,7 @@ except ImportError: ( "SOCKS support in urllib3 requires the installation of optional " "dependencies: specifically, PySocks. For more information, see " - "https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies" + "https://urllib3.readthedocs.io/en/1.26.x/contrib.html#socks-proxies" ), DependencyWarning, ) diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py b/env/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py index fce4caa65d2eeba92acc20ab31097cb606d6c412..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/packages/__init__.py @@ -1,5 +0,0 @@ -from __future__ import absolute_import - -from . import ssl_match_hostname - -__all__ = ("ssl_match_hostname",) diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 02aa51fa15e346b938ec39b1ff8d34076c0ab5e2..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-39.pyc deleted file mode 100644 index a62240feffc825fce5986b0c7e43ef59b2e74fa2..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 801ef48ec8379dbb973b0eef1967c6e3c3f0bfdb..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-39.pyc deleted file mode 100644 index 51dce1992154739169f3e848cbcd7c28112c0c8b..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/packages/six.py b/env/Lib/site-packages/pip/_vendor/urllib3/packages/six.py index 314424099f6240c4679d3b62685e4a38fe900449..f099a3dcd28d2fec21457c9b6c01ded4e3e9ddee 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/packages/six.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/packages/six.py @@ -1,4 +1,4 @@ -# Copyright (c) 2010-2019 Benjamin Peterson +# Copyright (c) 2010-2020 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -29,7 +29,7 @@ import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.12.0" +__version__ = "1.16.0" # Useful for very coarse version differentiation. @@ -71,6 +71,11 @@ else: MAXSIZE = int((1 << 63) - 1) del X +if PY34: + from importlib.util import spec_from_loader +else: + spec_from_loader = None + def _add_doc(func, doc): """Add documentation to a function.""" @@ -182,6 +187,11 @@ class _SixMetaPathImporter(object): return self return None + def find_spec(self, fullname, path, target=None): + if fullname in self.known_modules: + return spec_from_loader(fullname, self) + return None + def __get_module(self, fullname): try: return self.known_modules[fullname] @@ -220,6 +230,12 @@ class _SixMetaPathImporter(object): get_source = get_code # same as get_code + def create_module(self, spec): + return self.load_module(spec.name) + + def exec_module(self, module): + pass + _importer = _SixMetaPathImporter(__name__) @@ -260,9 +276,19 @@ _moved_attributes = [ ), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), + MovedModule( + "collections_abc", + "collections", + "collections.abc" if sys.version_info >= (3, 3) else "collections", + ), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), + MovedModule( + "_dummy_thread", + "dummy_thread", + "_dummy_thread" if sys.version_info < (3, 9) else "_thread", + ), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), @@ -307,7 +333,9 @@ _moved_attributes = [ ] # Add windows specific modules. if sys.platform == "win32": - _moved_attributes += [MovedModule("winreg", "_winreg")] + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) @@ -476,7 +504,7 @@ class Module_six_moves_urllib_robotparser(_LazyModule): _urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser") + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) @@ -678,9 +706,11 @@ if PY3: if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" + _assertNotRegex = "assertNotRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" + _assertNotRegex = "assertNotRegex" else: def b(s): @@ -707,6 +737,7 @@ else: _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" + _assertNotRegex = "assertNotRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") @@ -723,6 +754,10 @@ def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) +def assertNotRegex(self, *args, **kwargs): + return getattr(self, _assertNotRegex)(*args, **kwargs) + + if PY3: exec_ = getattr(moves.builtins, "exec") @@ -737,7 +772,6 @@ if PY3: value = None tb = None - else: def exec_(_code_, _globs_=None, _locs_=None): @@ -750,7 +784,7 @@ else: del frame elif _locs_ is None: _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") + exec ("""exec _code_ in _globs_, _locs_""") exec_( """def reraise(tp, value, tb=None): @@ -762,18 +796,7 @@ else: ) -if sys.version_info[:2] == (3, 2): - exec_( - """def raise_from(value, from_value): - try: - if from_value is None: - raise value - raise value from from_value - finally: - value = None -""" - ) -elif sys.version_info[:2] > (3, 2): +if sys.version_info[:2] > (3,): exec_( """def raise_from(value, from_value): try: @@ -863,19 +886,41 @@ if sys.version_info[:2] < (3, 3): _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): + # This does exactly the same what the :func:`py3:functools.update_wrapper` + # function does on Python versions after 3.2. It sets the ``__wrapped__`` + # attribute on ``wrapper`` object and it doesn't raise an error if any of + # the attributes mentioned in ``assigned`` and ``updated`` are missing on + # ``wrapped`` object. + def _update_wrapper( + wrapper, + wrapped, + assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES, + ): + for attr in assigned: + try: + value = getattr(wrapped, attr) + except AttributeError: + continue + else: + setattr(wrapper, attr, value) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + wrapper.__wrapped__ = wrapped + return wrapper + + _update_wrapper.__doc__ = functools.update_wrapper.__doc__ def wraps( wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES, ): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - - return wrapper + return functools.partial( + _update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated + ) + wraps.__doc__ = functools.wraps.__doc__ else: wraps = functools.wraps @@ -888,7 +933,15 @@ def with_metaclass(meta, *bases): # the actual metaclass. class metaclass(type): def __new__(cls, name, this_bases, d): - return meta(name, bases, d) + if sys.version_info[:2] >= (3, 7): + # This version introduced PEP 560 that requires a bit + # of extra care (we mimic what is done by __build_class__). + resolved_bases = types.resolve_bases(bases) + if resolved_bases is not bases: + d["__orig_bases__"] = bases + else: + resolved_bases = bases + return meta(name, resolved_bases, d) @classmethod def __prepare__(cls, name, this_bases): @@ -928,12 +981,11 @@ def ensure_binary(s, encoding="utf-8", errors="strict"): - `str` -> encoded to `bytes` - `bytes` -> `bytes` """ + if isinstance(s, binary_type): + return s if isinstance(s, text_type): return s.encode(encoding, errors) - elif isinstance(s, binary_type): - return s - else: - raise TypeError("not expecting type '%s'" % type(s)) + raise TypeError("not expecting type '%s'" % type(s)) def ensure_str(s, encoding="utf-8", errors="strict"): @@ -947,12 +999,15 @@ def ensure_str(s, encoding="utf-8", errors="strict"): - `str` -> `str` - `bytes` -> decoded to `str` """ - if not isinstance(s, (text_type, binary_type)): - raise TypeError("not expecting type '%s'" % type(s)) + # Optimization: Fast return for the common case. + if type(s) is str: + return s if PY2 and isinstance(s, text_type): - s = s.encode(encoding, errors) + return s.encode(encoding, errors) elif PY3 and isinstance(s, binary_type): - s = s.decode(encoding, errors) + return s.decode(encoding, errors) + elif not isinstance(s, (text_type, binary_type)): + raise TypeError("not expecting type '%s'" % type(s)) return s @@ -977,7 +1032,7 @@ def ensure_text(s, encoding="utf-8", errors="strict"): def python_2_unicode_compatible(klass): """ - A decorator that defines __unicode__ and __str__ methods under Python 2. + A class decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py b/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py deleted file mode 100644 index 6b12fd90aadeccacca096d16836c4d80d498cf58..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -import sys - -try: - # Our match_hostname function is the same as 3.5's, so we only want to - # import the match_hostname function if it's at least that good. - if sys.version_info < (3, 5): - raise ImportError("Fallback to vendored code") - - from ssl import CertificateError, match_hostname -except ImportError: - try: - # Backport of the function from a pypi module - from backports.ssl_match_hostname import ( # type: ignore - CertificateError, - match_hostname, - ) - except ImportError: - # Our vendored copy - from ._implementation import CertificateError, match_hostname # type: ignore - -# Not needed, but documenting what we provide. -__all__ = ("CertificateError", "match_hostname") diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 4c8deda5a4c951151d1e1a6c9be59dfe23c4c621..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-39.pyc deleted file mode 100644 index d3d2d92d3417ed330295530a81a606b993b2cac4..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py b/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py deleted file mode 100644 index 689208d3c63f1318e3a9a084a90e6b4532fa49bb..0000000000000000000000000000000000000000 --- a/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py +++ /dev/null @@ -1,160 +0,0 @@ -"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" - -# Note: This file is under the PSF license as the code comes from the python -# stdlib. http://docs.python.org/3/license.html - -import re -import sys - -# ipaddress has been backported to 2.6+ in pypi. If it is installed on the -# system, use it to handle IPAddress ServerAltnames (this was added in -# python-3.5) otherwise only do DNS matching. This allows -# backports.ssl_match_hostname to continue to be used in Python 2.7. -try: - import ipaddress -except ImportError: - ipaddress = None - -__version__ = "3.5.0.1" - - -class CertificateError(ValueError): - pass - - -def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - # Ported from python3-syntax: - # leftmost, *remainder = dn.split(r'.') - parts = dn.split(r".") - leftmost = parts[0] - remainder = parts[1:] - - wildcards = leftmost.count("*") - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn) - ) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == "*": - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append("[^.]+") - elif leftmost.startswith("xn--") or hostname.startswith("xn--"): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r"\*", "[^.]*")) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE) - return pat.match(hostname) - - -def _to_unicode(obj): - if isinstance(obj, str) and sys.version_info < (3,): - obj = unicode(obj, encoding="ascii", errors="strict") - return obj - - -def _ipaddress_match(ipname, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address - # Divergence from upstream: ipaddress can't handle byte str - ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) - return ip == host_ip - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError( - "empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED" - ) - try: - # Divergence from upstream: ipaddress can't handle byte str - host_ip = ipaddress.ip_address(_to_unicode(hostname)) - except ValueError: - # Not an IP address (common case) - host_ip = None - except UnicodeError: - # Divergence from upstream: Have to deal with ipaddress not taking - # byte strings. addresses should be all ascii, so we consider it not - # an ipaddress in this case - host_ip = None - except AttributeError: - # Divergence from upstream: Make ipaddress library optional - if ipaddress is None: - host_ip = None - else: - raise - dnsnames = [] - san = cert.get("subjectAltName", ()) - for key, value in san: - if key == "DNS": - if host_ip is None and _dnsname_match(value, hostname): - return - dnsnames.append(value) - elif key == "IP Address": - if host_ip is not None and _ipaddress_match(value, host_ip): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get("subject", ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == "commonName": - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError( - "hostname %r " - "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames))) - ) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0])) - else: - raise CertificateError( - "no appropriate commonName or subjectAltName fields were found" - ) diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py b/env/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py index 3a31a285bf64816fba65d2cb76b7aea66abaf534..14b10daf3a96229be87eed34c643e962a0d30450 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/poolmanager.py @@ -34,6 +34,7 @@ SSL_KEYWORDS = ( "ca_cert_dir", "ssl_context", "key_password", + "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its @@ -170,7 +171,7 @@ class PoolManager(RequestMethods): def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw - self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) + self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/response.py b/env/Lib/site-packages/pip/_vendor/urllib3/response.py index 38693f4fc6e33766f7a6b4f1227867ae86d2da32..8909f8454e94752d188ed13cf36c35f93fc6c3f2 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/response.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/response.py @@ -2,16 +2,16 @@ from __future__ import absolute_import import io import logging +import sys +import warnings import zlib from contextlib import contextmanager from socket import error as SocketError from socket import timeout as SocketTimeout -try: - import brotli -except ImportError: - brotli = None +brotli = None +from . import util from ._collections import HTTPHeaderDict from .connection import BaseSSLError, HTTPException from .exceptions import ( @@ -478,6 +478,54 @@ class HTTPResponse(io.IOBase): if self._original_response and self._original_response.isclosed(): self.release_conn() + def _fp_read(self, amt): + """ + Read a response with the thought that reading the number of bytes + larger than can fit in a 32-bit int at a time via SSL in some + known cases leads to an overflow error that has to be prevented + if `amt` or `self.length_remaining` indicate that a problem may + happen. + + The known cases: + * 3.8 <= CPython < 3.9.7 because of a bug + https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900. + * urllib3 injected with pyOpenSSL-backed SSL-support. + * CPython < 3.10 only when `amt` does not fit 32-bit int. + """ + assert self._fp + c_int_max = 2 ** 31 - 1 + if ( + ( + (amt and amt > c_int_max) + or (self.length_remaining and self.length_remaining > c_int_max) + ) + and not util.IS_SECURETRANSPORT + and (util.IS_PYOPENSSL or sys.version_info < (3, 10)) + ): + buffer = io.BytesIO() + # Besides `max_chunk_amt` being a maximum chunk size, it + # affects memory overhead of reading a response by this + # method in CPython. + # `c_int_max` equal to 2 GiB - 1 byte is the actual maximum + # chunk size that does not lead to an overflow error, but + # 256 MiB is a compromise. + max_chunk_amt = 2 ** 28 + while amt is None or amt != 0: + if amt is not None: + chunk_amt = min(amt, max_chunk_amt) + amt -= chunk_amt + else: + chunk_amt = max_chunk_amt + data = self._fp.read(chunk_amt) + if not data: + break + buffer.write(data) + del data # to reduce peak memory usage by `max_chunk_amt`. + return buffer.getvalue() + else: + # StringIO doesn't like amt=None + return self._fp.read(amt) if amt is not None else self._fp.read() + def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`http.client.HTTPResponse.read`, but with two additional @@ -510,13 +558,11 @@ class HTTPResponse(io.IOBase): fp_closed = getattr(self._fp, "closed", False) with self._error_catcher(): + data = self._fp_read(amt) if not fp_closed else b"" if amt is None: - # cStringIO doesn't like amt=None - data = self._fp.read() if not fp_closed else b"" flush_decoder = True else: cache_content = False - data = self._fp.read(amt) if not fp_closed else b"" if ( amt != 0 and not data ): # Platform-specific: Buggy versions of Python. @@ -612,9 +658,21 @@ class HTTPResponse(io.IOBase): # Backwards-compatibility methods for http.client.HTTPResponse def getheaders(self): + warnings.warn( + "HTTPResponse.getheaders() is deprecated and will be removed " + "in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.", + category=DeprecationWarning, + stacklevel=2, + ) return self.headers def getheader(self, name, default=None): + warnings.warn( + "HTTPResponse.getheader() is deprecated and will be removed " + "in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).", + category=DeprecationWarning, + stacklevel=2, + ) return self.headers.get(name, default) # Backwards compatibility for http.cookiejar diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 4b38bab0daaa82c515278469603046675949dc42..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-39.pyc deleted file mode 100644 index 36f120ff5056d4d4a8dac8cbebf80bbccb921c3f..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-39.pyc deleted file mode 100644 index 476231a2cf4013ef8d11179c6c76b5d4a106cab7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/proxy.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-39.pyc deleted file mode 100644 index ad5b7f43da3c90bad6d8062c2995ccb02e817cf4..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-39.pyc deleted file mode 100644 index 98be7c28afb8b1f4d975ff29946c63fb10233bad..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-39.pyc deleted file mode 100644 index c074f5247b59fc475cbe9b46f44b6bb5c9fcea87..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-39.pyc deleted file mode 100644 index 0cd5e28e0363729af88574bbefc54ec4514fb3dd..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-39.pyc deleted file mode 100644 index d042504c017a559495708b2f2d1f32f85b4dcb78..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-39.pyc deleted file mode 100644 index 218adca57277a39ada6f19f46a67f7b701b337e9..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-39.pyc deleted file mode 100644 index 32587903fb663fc956d83adf48903c45aa24c356..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-39.pyc deleted file mode 100644 index 0a4b62b6136e95016e31300d89d9fefd9ae3ab9e..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-39.pyc deleted file mode 100644 index 32922559841fde664bda32b4ee90639fdca557c7..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/connection.py b/env/Lib/site-packages/pip/_vendor/urllib3/util/connection.py index f1e5d37f88fbf4ae53f266791aaae73dc88d5727..6af1138f260e4eaaa0aa242f7f50b918a283b49f 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/util/connection.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/util/connection.py @@ -2,9 +2,8 @@ from __future__ import absolute_import import socket -from pip._vendor.urllib3.exceptions import LocationParseError - from ..contrib import _appengine_environ +from ..exceptions import LocationParseError from ..packages import six from .wait import NoWayToWaitForSocketError, wait_for_read @@ -118,7 +117,7 @@ def allowed_gai_family(): def _has_ipv6(host): - """ Returns True if the system can bind an IPv6 address. """ + """Returns True if the system can bind an IPv6 address.""" sock = None has_ipv6 = False diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py b/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py index 34f884d5b314dbb28abd2af3ce159630f6028180..2199cc7b7f004009493d032720c36d6568f9d89e 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py @@ -45,6 +45,7 @@ def create_proxy_ssl_context( ssl_version=resolve_ssl_version(ssl_version), cert_reqs=resolve_cert_reqs(cert_reqs), ) + if ( not ca_certs and not ca_cert_dir diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/request.py b/env/Lib/site-packages/pip/_vendor/urllib3/util/request.py index 25103383ec7abc7b46fb6a6f549efa38e4abe24c..330766ef4f3403e05a6ad8ec30f25fe05fdbc199 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/util/request.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/util/request.py @@ -13,12 +13,6 @@ SKIP_HEADER = "@@@SKIP_HEADER@@@" SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"]) ACCEPT_ENCODING = "gzip,deflate" -try: - import brotli as _unused_module_brotli # noqa: F401 -except ImportError: - pass -else: - ACCEPT_ENCODING += ",br" _FAILEDTELL = object() diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/retry.py b/env/Lib/site-packages/pip/_vendor/urllib3/util/retry.py index d25a41b42ea43ae9261c5444bcd56b10641b4fa9..2490d5e5b63359a7f826922dc69c0015cb9a5b2e 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/util/retry.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/util/retry.py @@ -37,7 +37,7 @@ class _RetryMeta(type): def DEFAULT_METHOD_WHITELIST(cls): warnings.warn( "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and " - "will be removed in v2.0. Use 'Retry.DEFAULT_METHODS_ALLOWED' instead", + "will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead", DeprecationWarning, ) return cls.DEFAULT_ALLOWED_METHODS @@ -69,6 +69,24 @@ class _RetryMeta(type): ) cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value + @property + def BACKOFF_MAX(cls): + warnings.warn( + "Using 'Retry.BACKOFF_MAX' is deprecated and " + "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead", + DeprecationWarning, + ) + return cls.DEFAULT_BACKOFF_MAX + + @BACKOFF_MAX.setter + def BACKOFF_MAX(cls, value): + warnings.warn( + "Using 'Retry.BACKOFF_MAX' is deprecated and " + "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead", + DeprecationWarning, + ) + cls.DEFAULT_BACKOFF_MAX = value + @six.add_metaclass(_RetryMeta) class Retry(object): @@ -181,7 +199,7 @@ class Retry(object): seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer - than :attr:`Retry.BACKOFF_MAX`. + than :attr:`Retry.DEFAULT_BACKOFF_MAX`. By default, backoff is disabled (set to 0). @@ -220,7 +238,7 @@ class Retry(object): DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Authorization"]) #: Maximum backoff time. - BACKOFF_MAX = 120 + DEFAULT_BACKOFF_MAX = 120 def __init__( self, @@ -321,7 +339,7 @@ class Retry(object): @classmethod def from_int(cls, retries, redirect=True, default=None): - """ Backwards-compatibility for the old retries format.""" + """Backwards-compatibility for the old retries format.""" if retries is None: retries = default if default is not None else cls.DEFAULT @@ -348,7 +366,7 @@ class Retry(object): return 0 backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) - return min(self.BACKOFF_MAX, backoff_value) + return min(self.DEFAULT_BACKOFF_MAX, backoff_value) def parse_retry_after(self, retry_after): # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 @@ -374,9 +392,9 @@ class Retry(object): return seconds def get_retry_after(self, response): - """ Get the value of Retry-After in seconds. """ + """Get the value of Retry-After in seconds.""" - retry_after = response.getheader("Retry-After") + retry_after = response.headers.get("Retry-After") if retry_after is None: return None @@ -468,7 +486,7 @@ class Retry(object): ) def is_exhausted(self): - """ Are we out of retries? """ + """Are we out of retries?""" retry_counts = ( self.total, self.connect, diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py b/env/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py index 763da82bb66b1c3c157646af522880c2fe58103b..2b45d391d4d7398e4769f45f9dd25eb55daef437 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py @@ -71,6 +71,11 @@ except ImportError: except ImportError: PROTOCOL_SSLv23 = PROTOCOL_TLS = 2 +try: + from ssl import PROTOCOL_TLS_CLIENT +except ImportError: + PROTOCOL_TLS_CLIENT = PROTOCOL_TLS + try: from ssl import OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3 @@ -159,7 +164,7 @@ except ImportError: "urllib3 from configuring SSL appropriately and may cause " "certain SSL connections to fail. You can upgrade to a newer " "version of Python to solve this. For more information, see " - "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#ssl-warnings", InsecurePlatformWarning, ) @@ -278,7 +283,11 @@ def create_urllib3_context( Constructed SSLContext object with specified options :rtype: SSLContext """ - context = SSLContext(ssl_version or PROTOCOL_TLS) + # PROTOCOL_TLS is deprecated in Python 3.10 + if not ssl_version or ssl_version == PROTOCOL_TLS: + ssl_version = PROTOCOL_TLS_CLIENT + + context = SSLContext(ssl_version) context.set_ciphers(ciphers or DEFAULT_CIPHERS) @@ -313,13 +322,25 @@ def create_urllib3_context( ) is not None: context.post_handshake_auth = True - context.verify_mode = cert_reqs - if ( - getattr(context, "check_hostname", None) is not None - ): # Platform-specific: Python 3.2 - # We do our own verification, including fingerprints and alternative - # hostnames. So disable it here - context.check_hostname = False + def disable_check_hostname(): + if ( + getattr(context, "check_hostname", None) is not None + ): # Platform-specific: Python 3.2 + # We do our own verification, including fingerprints and alternative + # hostnames. So disable it here + context.check_hostname = False + + # The order of the below lines setting verify_mode and check_hostname + # matter due to safe-guards SSLContext has to prevent an SSLContext with + # check_hostname=True, verify_mode=NONE/OPTIONAL. This is made even more + # complex because we don't know whether PROTOCOL_TLS_CLIENT will be used + # or not so we don't know the initial state of the freshly created SSLContext. + if cert_reqs == ssl.CERT_REQUIRED: + context.verify_mode = cert_reqs + disable_check_hostname() + else: + disable_check_hostname() + context.verify_mode = cert_reqs # Enable logging of TLS session keys via defacto standard environment variable # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values. @@ -401,7 +422,7 @@ def ssl_wrap_socket( try: if hasattr(context, "set_alpn_protocols"): context.set_alpn_protocols(ALPN_PROTOCOLS) - except NotImplementedError: + except NotImplementedError: # Defensive: in CI, we always have set_alpn_protocols pass # If we detect server_hostname is an IP address then the SNI @@ -419,7 +440,7 @@ def ssl_wrap_socket( "This may cause the server to present an incorrect TLS " "certificate, which can cause validation failures. You can upgrade to " "a newer version of Python to solve this. For more information, see " - "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#ssl-warnings", SNIMissingWarning, ) diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py b/env/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py index ca00233c93175460ae35c1c1d68cc6f5d98f003f..4a7105d17916a7237f3df6e59d65ca82375f8803 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/util/ssltransport.py @@ -2,8 +2,8 @@ import io import socket import ssl -from pip._vendor.urllib3.exceptions import ProxySchemeUnsupported -from pip._vendor.urllib3.packages import six +from ..exceptions import ProxySchemeUnsupported +from ..packages import six SSL_BLOCKSIZE = 16384 @@ -193,7 +193,7 @@ class SSLTransport: raise def _ssl_io_loop(self, func, *args): - """ Performs an I/O loop between incoming/outgoing and the socket.""" + """Performs an I/O loop between incoming/outgoing and the socket.""" should_loop = True ret = None diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py b/env/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py index ff69593b05b5eb5fcd336b4bd16193c44dc48ef5..78e18a6272482e3946de83c0274badc4a5cfcdfa 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/util/timeout.py @@ -2,9 +2,8 @@ from __future__ import absolute_import import time -# The default socket timeout, used by httplib to indicate that no timeout was -# specified by the user -from socket import _GLOBAL_DEFAULT_TIMEOUT +# The default socket timeout, used by httplib to indicate that no timeout was; specified by the user +from socket import _GLOBAL_DEFAULT_TIMEOUT, getdefaulttimeout from ..exceptions import TimeoutStateError @@ -116,6 +115,10 @@ class Timeout(object): # __str__ provided for backwards compatibility __str__ = __repr__ + @classmethod + def resolve_default_timeout(cls, timeout): + return getdefaulttimeout() if timeout is cls.DEFAULT_TIMEOUT else timeout + @classmethod def _validate_timeout(cls, value, name): """Check that a timeout attribute is valid. diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/url.py b/env/Lib/site-packages/pip/_vendor/urllib3/util/url.py index 66c8795b11e79d29444ed25b7dcddee56903fd35..a960b2f3c5f3d11fc9ae43638da9877d635e8d91 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/util/url.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/util/url.py @@ -50,7 +50,7 @@ _variations = [ "(?:(?:%(hex)s:){0,6}%(hex)s)?::", ] -UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~" +UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._\-~" IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")" ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+" IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]" @@ -63,12 +63,12 @@ IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$") BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$") ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$") -SUBAUTHORITY_PAT = (u"^(?:(.*)@)?(%s|%s|%s)(?::([0-9]{0,5}))?$") % ( +_HOST_PORT_PAT = ("^(%s|%s|%s)(?::0*?(|0|[1-9][0-9]{0,4}))?$") % ( REG_NAME_PAT, IPV4_PAT, IPV6_ADDRZ_PAT, ) -SUBAUTHORITY_RE = re.compile(SUBAUTHORITY_PAT, re.UNICODE | re.DOTALL) +_HOST_PORT_RE = re.compile(_HOST_PORT_PAT, re.UNICODE | re.DOTALL) UNRESERVED_CHARS = set( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~" @@ -279,6 +279,9 @@ def _normalize_host(host, scheme): if scheme in NORMALIZABLE_SCHEMES: is_ipv6 = IPV6_ADDRZ_RE.match(host) if is_ipv6: + # IPv6 hosts of the form 'a::b%zone' are encoded in a URL as + # such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID + # separator as necessary to return a valid RFC 4007 scoped IP. match = ZONE_ID_RE.search(host) if match: start, end = match.span(1) @@ -300,7 +303,7 @@ def _normalize_host(host, scheme): def _idna_encode(name): - if name and any([ord(x) > 128 for x in name]): + if name and any(ord(x) >= 128 for x in name): try: from pip._vendor import idna except ImportError: @@ -331,7 +334,7 @@ def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. - This parser is RFC 3986 compliant. + This parser is RFC 3986 and RFC 6874 compliant. The parser logic and helper functions are based heavily on work done in the ``rfc3986`` module. @@ -365,7 +368,9 @@ def parse_url(url): scheme = scheme.lower() if authority: - auth, host, port = SUBAUTHORITY_RE.match(authority).groups() + auth, _, host_port = authority.rpartition("@") + auth = auth or None + host, port = _HOST_PORT_RE.match(host_port).groups() if auth and normalize_uri: auth = _encode_invalid_chars(auth, USERINFO_CHARS) if port == "": diff --git a/env/Lib/site-packages/pip/_vendor/urllib3/util/wait.py b/env/Lib/site-packages/pip/_vendor/urllib3/util/wait.py index c280646c7be0b1887878254408c6f6c5158651ca..21b4590b3dc9b58902b0d47164b9023e54a85ef8 100644 --- a/env/Lib/site-packages/pip/_vendor/urllib3/util/wait.py +++ b/env/Lib/site-packages/pip/_vendor/urllib3/util/wait.py @@ -42,7 +42,6 @@ if sys.version_info >= (3, 5): def _retry_on_intr(fn, timeout): return fn(timeout) - else: # Old and broken Pythons. def _retry_on_intr(fn, timeout): diff --git a/env/Lib/site-packages/pip/_vendor/vendor.txt b/env/Lib/site-packages/pip/_vendor/vendor.txt index 6c9732e97d8c32dd8f98520a8051f223e6b92514..4ab2915fb80caa0eb282c675c0763060c639448c 100644 --- a/env/Lib/site-packages/pip/_vendor/vendor.txt +++ b/env/Lib/site-packages/pip/_vendor/vendor.txt @@ -1,22 +1,23 @@ -appdirs==1.4.4 -CacheControl==0.12.6 -colorama==0.4.4 -distlib==0.3.1 -distro==1.5.0 -html5lib==1.1 -msgpack==1.0.2 -packaging==20.9 -pep517==0.10.0 -progress==1.5 -pyparsing==2.4.7 -requests==2.25.1 - certifi==2020.12.05 - chardet==4.0.0 - idna==3.1 - urllib3==1.26.4 -resolvelib==0.7.0 -setuptools==44.0.0 -six==1.15.0 -tenacity==7.0.0 -toml==0.10.2 +CacheControl==0.12.11 # Make sure to update the license in pyproject.toml for this. +colorama==0.4.6 +distlib==0.3.6 +distro==1.8.0 +msgpack==1.0.5 +packaging==21.3 +platformdirs==3.8.1 +pyparsing==3.1.0 +pyproject-hooks==1.0.0 +requests==2.31.0 + certifi==2023.5.7 + chardet==5.1.0 + idna==3.4 + urllib3==1.26.16 +rich==13.4.2 + pygments==2.15.1 + typing_extensions==4.7.1 +resolvelib==1.0.1 +setuptools==68.0.0 +six==1.16.0 +tenacity==8.2.2 +tomli==2.0.1 webencodings==0.5.1 diff --git a/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/__init__.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index fcdd9887065062532204f48c133c95c606a5caed..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/labels.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/labels.cpython-39.pyc deleted file mode 100644 index 73c17f1c89e2d66c3a7e254eef7bd576669cab77..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/labels.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/mklabels.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/mklabels.cpython-39.pyc deleted file mode 100644 index df8605f830f49418442d79feebbf5d5abfdd9974..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/mklabels.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/tests.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/tests.cpython-39.pyc deleted file mode 100644 index 5ea0109cb3369e363f18fd05d2eb7e5182b97e17..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/tests.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-39.pyc b/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-39.pyc deleted file mode 100644 index 674582f58a3b1ead98ea0a0d25e86af1322b89e3..0000000000000000000000000000000000000000 Binary files a/env/Lib/site-packages/pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-39.pyc and /dev/null differ diff --git a/env/Lib/site-packages/pip/py.typed b/env/Lib/site-packages/pip/py.typed index 0b44fd9b5c74636468dd626657fb3efd2c5237c1..493b53e4e7a3984ddd49780313bf3bd9901dc1e0 100644 --- a/env/Lib/site-packages/pip/py.typed +++ b/env/Lib/site-packages/pip/py.typed @@ -1,4 +1,4 @@ pip is a command line program. While it is implemented in Python, and so is available for import, you must not use pip's internal APIs in this way. Typing -information is provided as a convenience only and is not a gaurantee. Expect +information is provided as a convenience only and is not a guarantee. Expect unannounced changes to the API and types in releases. diff --git a/env/Scripts/Activate.ps1 b/env/Scripts/Activate.ps1 index e06da14440d84e4ec21d2af4c421ce340d3a3d9b..e4891d8698f93677bac47182edfb68c22a5511df 100644 --- a/env/Scripts/Activate.ps1 +++ b/env/Scripts/Activate.ps1 @@ -96,6 +96,11 @@ function global:deactivate ([switch]$NonDestructive) { Remove-Item -Path env:VIRTUAL_ENV } + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force @@ -197,7 +202,7 @@ else { $Prompt = $pyvenvCfg['prompt']; } else { - Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)" + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" $Prompt = Split-Path -Path $venvDir -Leaf } @@ -228,6 +233,7 @@ if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " _OLD_VIRTUAL_PROMPT } + $env:VIRTUAL_ENV_PROMPT = $Prompt } # Clear PYTHONHOME @@ -241,159 +247,256 @@ Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH $Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" # SIG # Begin signature block -# MIIc+AYJKoZIhvcNAQcCoIIc6TCCHOUCAQExDzANBglghkgBZQMEAgEFADB5Bgor +# MIIvJAYJKoZIhvcNAQcCoIIvFTCCLxECAQExDzANBglghkgBZQMEAgEFADB5Bgor # BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG -# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCAwnDYwEHaCQq0n -# 8NAvsN7H7BO7/48rXCNwrg891FS5vaCCC38wggUwMIIEGKADAgECAhAECRgbX9W7 -# ZnVTQ7VvlVAIMA0GCSqGSIb3DQEBCwUAMGUxCzAJBgNVBAYTAlVTMRUwEwYDVQQK -# EwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xJDAiBgNV -# BAMTG0RpZ2lDZXJ0IEFzc3VyZWQgSUQgUm9vdCBDQTAeFw0xMzEwMjIxMjAwMDBa -# Fw0yODEwMjIxMjAwMDBaMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2Vy -# dCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNVBAMTKERpZ2lD -# ZXJ0IFNIQTIgQXNzdXJlZCBJRCBDb2RlIFNpZ25pbmcgQ0EwggEiMA0GCSqGSIb3 -# DQEBAQUAA4IBDwAwggEKAoIBAQD407Mcfw4Rr2d3B9MLMUkZz9D7RZmxOttE9X/l -# qJ3bMtdx6nadBS63j/qSQ8Cl+YnUNxnXtqrwnIal2CWsDnkoOn7p0WfTxvspJ8fT -# eyOU5JEjlpB3gvmhhCNmElQzUHSxKCa7JGnCwlLyFGeKiUXULaGj6YgsIJWuHEqH -# CN8M9eJNYBi+qsSyrnAxZjNxPqxwoqvOf+l8y5Kh5TsxHM/q8grkV7tKtel05iv+ -# bMt+dDk2DZDv5LVOpKnqagqrhPOsZ061xPeM0SAlI+sIZD5SlsHyDxL0xY4PwaLo -# LFH3c7y9hbFig3NBggfkOItqcyDQD2RzPJ6fpjOp/RnfJZPRAgMBAAGjggHNMIIB -# yTASBgNVHRMBAf8ECDAGAQH/AgEAMA4GA1UdDwEB/wQEAwIBhjATBgNVHSUEDDAK -# BggrBgEFBQcDAzB5BggrBgEFBQcBAQRtMGswJAYIKwYBBQUHMAGGGGh0dHA6Ly9v -# Y3NwLmRpZ2ljZXJ0LmNvbTBDBggrBgEFBQcwAoY3aHR0cDovL2NhY2VydHMuZGln -# aWNlcnQuY29tL0RpZ2lDZXJ0QXNzdXJlZElEUm9vdENBLmNydDCBgQYDVR0fBHow -# eDA6oDigNoY0aHR0cDovL2NybDQuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0QXNzdXJl -# ZElEUm9vdENBLmNybDA6oDigNoY0aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0Rp -# Z2lDZXJ0QXNzdXJlZElEUm9vdENBLmNybDBPBgNVHSAESDBGMDgGCmCGSAGG/WwA -# AgQwKjAoBggrBgEFBQcCARYcaHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzAK -# BghghkgBhv1sAzAdBgNVHQ4EFgQUWsS5eyoKo6XqcQPAYPkt9mV1DlgwHwYDVR0j -# BBgwFoAUReuir/SSy4IxLVGLp6chnfNtyA8wDQYJKoZIhvcNAQELBQADggEBAD7s -# DVoks/Mi0RXILHwlKXaoHV0cLToaxO8wYdd+C2D9wz0PxK+L/e8q3yBVN7Dh9tGS -# dQ9RtG6ljlriXiSBThCk7j9xjmMOE0ut119EefM2FAaK95xGTlz/kLEbBw6RFfu6 -# r7VRwo0kriTGxycqoSkoGjpxKAI8LpGjwCUR4pwUR6F6aGivm6dcIFzZcbEMj7uo -# +MUSaJ/PQMtARKUT8OZkDCUIQjKyNookAv4vcn4c10lFluhZHen6dGRrsutmQ9qz -# sIzV6Q3d9gEgzpkxYz0IGhizgZtPxpMQBvwHgfqL2vmCSfdibqFT+hKUGIUukpHq -# aGxEMrJmoecYpJpkUe8wggZHMIIFL6ADAgECAhADPtXtoGXRuMkd/PkqbJvYMA0G -# CSqGSIb3DQEBCwUAMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJ -# bmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNVBAMTKERpZ2lDZXJ0 -# IFNIQTIgQXNzdXJlZCBJRCBDb2RlIFNpZ25pbmcgQ0EwHhcNMTgxMjE4MDAwMDAw -# WhcNMjExMjIyMTIwMDAwWjCBgzELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDU5ldyBI -# YW1wc2hpcmUxEjAQBgNVBAcTCVdvbGZlYm9ybzEjMCEGA1UEChMaUHl0aG9uIFNv -# ZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMTGlB5dGhvbiBTb2Z0d2FyZSBGb3Vu -# ZGF0aW9uMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAqr2kS7J1uW7o -# JRxlsdrETAjKarfoH5TI8PWST6Yb2xPooP7vHT4iaVXyL5Lze1f53Jw67Sp+u524 -# fJXf30qHViEWxumy2RWG0nciU2d+mMqzjlaAWSZNF0u4RcvyDJokEV0RUOqI5CG5 -# zPI3W9uQ6LiUk3HCYW6kpH177A5T3pw/Po8O8KErJGn1anaqtIICq99ySxrMad/2 -# hPMBRf6Ndah7f7HPn1gkSSTAoejyuqF5h+B0qI4+JK5+VLvz659VTbAWJsYakkxZ -# xVWYpFv4KeQSSwoo0DzMvmERsTzNvVBMWhu9OriJNg+QfFmf96zVTu93cZ+r7xMp -# bXyfIOGKhHMaRuZ8ihuWIx3gI9WHDFX6fBKR8+HlhdkaiBEWIsXRoy+EQUyK7zUs -# +FqOo2sRYttbs8MTF9YDKFZwyPjn9Wn+gLGd5NUEVyNvD9QVGBEtN7vx87bduJUB -# 8F4DylEsMtZTfjw/au6AmOnmneK5UcqSJuwRyZaGNk7y3qj06utx+HTTqHgi975U -# pxfyrwAqkovoZEWBVSpvku8PVhkBXcLmNe6MEHlFiaMoiADAeKmX5RFRkN+VrmYG -# Tg4zajxfdHeIY8TvLf48tTfmnQJd98geJQv/01NUy/FxuwqAuTkaez5Nl1LxP0Cp -# THhghzO4FRD4itT2wqTh4jpojw9QZnsCAwEAAaOCAcUwggHBMB8GA1UdIwQYMBaA -# FFrEuXsqCqOl6nEDwGD5LfZldQ5YMB0GA1UdDgQWBBT8Kr9+1L6s84KcpM97IgE7 -# uI8H8jAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUHAwMwdwYDVR0f -# BHAwbjA1oDOgMYYvaHR0cDovL2NybDMuZGlnaWNlcnQuY29tL3NoYTItYXNzdXJl -# ZC1jcy1nMS5jcmwwNaAzoDGGL2h0dHA6Ly9jcmw0LmRpZ2ljZXJ0LmNvbS9zaGEy -# LWFzc3VyZWQtY3MtZzEuY3JsMEwGA1UdIARFMEMwNwYJYIZIAYb9bAMBMCowKAYI -# KwYBBQUHAgEWHGh0dHBzOi8vd3d3LmRpZ2ljZXJ0LmNvbS9DUFMwCAYGZ4EMAQQB -# MIGEBggrBgEFBQcBAQR4MHYwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2lj -# ZXJ0LmNvbTBOBggrBgEFBQcwAoZCaHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29t -# L0RpZ2lDZXJ0U0hBMkFzc3VyZWRJRENvZGVTaWduaW5nQ0EuY3J0MAwGA1UdEwEB -# /wQCMAAwDQYJKoZIhvcNAQELBQADggEBAEt1oS21X0axiafPjyY+vlYqjWKuUu/Y -# FuYWIEq6iRRaFabNDhj9RBFQF/aJiE5msrQEOfAD6/6gVSH91lZWBqg6NEeG9T9S -# XbiAPvJ9CEWFsdkXUrjbWhvCnuZ7kqUuU5BAumI1QRbpYgZL3UA+iZXkmjbGh1ln -# 8rUhWIxbBYL4Sg2nqpB44p7CUFYkPj/MbwU2gvBV2pXjj5WaskoZtsACMv5g42BN -# oVLoRAi+ev6s07POt+JtHRIm87lTyuc8wh0swTPUwksKbLU1Zdj9CpqtzXnuVE0w -# 50exJvRSK3Vt4g+0vigpI3qPmDdpkf9+4Mvy0XMNcqrthw20R+PkIlMxghDPMIIQ -# ywIBATCBhjByMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkw -# FwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYDVQQDEyhEaWdpQ2VydCBTSEEy -# IEFzc3VyZWQgSUQgQ29kZSBTaWduaW5nIENBAhADPtXtoGXRuMkd/PkqbJvYMA0G -# CWCGSAFlAwQCAQUAoIGYMBkGCSqGSIb3DQEJAzEMBgorBgEEAYI3AgEEMBwGCisG -# AQQBgjcCAQsxDjAMBgorBgEEAYI3AgEVMCwGCisGAQQBgjcCAQwxHjAcoBqAGABQ -# AHkAdABoAG8AbgAgADMALgA5AC4ANjAvBgkqhkiG9w0BCQQxIgQgBrni4mcRv7sM -# JHsxpROjRopOz2wuQVrJnn+lD7X7y+gwDQYJKoZIhvcNAQEBBQAEggIAKBxtIIh2 -# APcTqLi9A0nTuTBMVUsNOQNQzSI9fW92jLmXUh0OGygpOMC9GiVVRnHNGmCWt9FV -# pMkolylPuI7wj08VFv5xvsnWo9EKM2+M0zZ1fN+/zNDvrRPKmhUTQ/fGIP9OWF3x -# qbUCGSXrzVf/wSvYmhdBXa7pTrivIx8JOS8fhK5dqSiCmtJOPQ45ZEFNzfgB5i9e -# 9lfNQ0wXKfKHo2zdqS19VEbuIUN8GjSlos7rGHP20aMjb0ZgKWBkYHHm4yNAgJCU -# AJ8K+mL2+KRJDyxwH1oFjgkFKAvEHMVo8by3TigZIxmIkNlYBYx3oh7S3wgKKtNf -# wZqX6/iDYOBmj49CxOEfoN4jtg5kg1slzbham+EPAE2pkNmg+RMkF0j9lJ5KTWrt -# tUZvWJI17UQV0bRlbp4bYiI7OxGDD3LNU1iQo69J3q8rFc+yyplD7lJzKb7h/mH/ -# oMBa6TVHNBuSBSMGLN6xOoshcwWMo0hhfdOqyjFNgdMHO8cEwXGhwJFPimXXt+NW -# KeKaW9i7dSzt1uNnqNXqXauk5A4upOxLceMFhmnoUwcsyxRxSFrti+uyUVMXOs1a -# Q8YyQZFfjy5CeOjq7ohDbBRarX5JwJrkB/BYya6TA9SKfRYwIxfiwYFf6yGLrjAy -# E0Qjhz5Np39AYpfcaWk31MeLse8NVtgVPPmhgg1+MIINegYKKwYBBAGCNwMDATGC -# DWowgg1mBgkqhkiG9w0BBwKggg1XMIINUwIBAzEPMA0GCWCGSAFlAwQCAQUAMHgG -# CyqGSIb3DQEJEAEEoGkEZzBlAgEBBglghkgBhv1sBwEwMTANBglghkgBZQMEAgEF -# AAQgUVAItwy7RQCrM4tGC7KCANd0O7DgaULOpzirVGSU8QMCEQDLPomRSCQZjdG/ -# GAILYuAXGA8yMDIxMDYyODE1MzMxNVqgggo3MIIE/jCCA+agAwIBAgIQDUJK4L46 -# iP9gQCHOFADw3TANBgkqhkiG9w0BAQsFADByMQswCQYDVQQGEwJVUzEVMBMGA1UE -# ChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYD -# VQQDEyhEaWdpQ2VydCBTSEEyIEFzc3VyZWQgSUQgVGltZXN0YW1waW5nIENBMB4X -# DTIxMDEwMTAwMDAwMFoXDTMxMDEwNjAwMDAwMFowSDELMAkGA1UEBhMCVVMxFzAV -# BgNVBAoTDkRpZ2lDZXJ0LCBJbmMuMSAwHgYDVQQDExdEaWdpQ2VydCBUaW1lc3Rh -# bXAgMjAyMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMLmYYRnxYr1 -# DQikRcpja1HXOhFCvQp1dU2UtAxQtSYQ/h3Ib5FrDJbnGlxI70Tlv5thzRWRYlq4 -# /2cLnGP9NmqB+in43Stwhd4CGPN4bbx9+cdtCT2+anaH6Yq9+IRdHnbJ5MZ2djpT -# 0dHTWjaPxqPhLxs6t2HWc+xObTOKfF1FLUuxUOZBOjdWhtyTI433UCXoZObd048v -# V7WHIOsOjizVI9r0TXhG4wODMSlKXAwxikqMiMX3MFr5FK8VX2xDSQn9JiNT9o1j -# 6BqrW7EdMMKbaYK02/xWVLwfoYervnpbCiAvSwnJlaeNsvrWY4tOpXIc7p96AXP4 -# Gdb+DUmEvQECAwEAAaOCAbgwggG0MA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8E -# AjAAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMIMEEGA1UdIAQ6MDgwNgYJYIZIAYb9 -# bAcBMCkwJwYIKwYBBQUHAgEWG2h0dHA6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzAf -# BgNVHSMEGDAWgBT0tuEgHf4prtLkYaWyoiWyyBc1bjAdBgNVHQ4EFgQUNkSGjqS6 -# sGa+vCgtHUQ23eNqerwwcQYDVR0fBGowaDAyoDCgLoYsaHR0cDovL2NybDMuZGln -# aWNlcnQuY29tL3NoYTItYXNzdXJlZC10cy5jcmwwMqAwoC6GLGh0dHA6Ly9jcmw0 -# LmRpZ2ljZXJ0LmNvbS9zaGEyLWFzc3VyZWQtdHMuY3JsMIGFBggrBgEFBQcBAQR5 -# MHcwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBPBggrBgEF -# BQcwAoZDaHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0U0hBMkFz -# c3VyZWRJRFRpbWVzdGFtcGluZ0NBLmNydDANBgkqhkiG9w0BAQsFAAOCAQEASBzc -# temaI7znGucgDo5nRv1CclF0CiNHo6uS0iXEcFm+FKDlJ4GlTRQVGQd58NEEw4bZ -# O73+RAJmTe1ppA/2uHDPYuj1UUp4eTZ6J7fz51Kfk6ftQ55757TdQSKJ+4eiRgNO -# /PT+t2R3Y18jUmmDgvoaU+2QzI2hF3MN9PNlOXBL85zWenvaDLw9MtAby/Vh/HUI -# AHa8gQ74wOFcz8QRcucbZEnYIpp1FUL1LTI4gdr0YKK6tFL7XOBhJCVPst/JKahz -# Q1HavWPWH1ub9y4bTxMd90oNcX6Xt/Q/hOvB46NJofrOp79Wz7pZdmGJX36ntI5n -# ePk2mOHLKNpbh6aKLzCCBTEwggQZoAMCAQICEAqhJdbWMht+QeQF2jaXwhUwDQYJ -# KoZIhvcNAQELBQAwZTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IElu +# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCBnL745ElCYk8vk +# dBtMuQhLeWJ3ZGfzKW4DHCYzAn+QB6CCE8MwggWQMIIDeKADAgECAhAFmxtXno4h +# MuI5B72nd3VcMA0GCSqGSIb3DQEBDAUAMGIxCzAJBgNVBAYTAlVTMRUwEwYDVQQK +# EwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xITAfBgNV +# BAMTGERpZ2lDZXJ0IFRydXN0ZWQgUm9vdCBHNDAeFw0xMzA4MDExMjAwMDBaFw0z +# ODAxMTUxMjAwMDBaMGIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJ +# bmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xITAfBgNVBAMTGERpZ2lDZXJ0 +# IFRydXN0ZWQgUm9vdCBHNDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +# AL/mkHNo3rvkXUo8MCIwaTPswqclLskhPfKK2FnC4SmnPVirdprNrnsbhA3EMB/z +# G6Q4FutWxpdtHauyefLKEdLkX9YFPFIPUh/GnhWlfr6fqVcWWVVyr2iTcMKyunWZ +# anMylNEQRBAu34LzB4TmdDttceItDBvuINXJIB1jKS3O7F5OyJP4IWGbNOsFxl7s +# Wxq868nPzaw0QF+xembud8hIqGZXV59UWI4MK7dPpzDZVu7Ke13jrclPXuU15zHL +# 2pNe3I6PgNq2kZhAkHnDeMe2scS1ahg4AxCN2NQ3pC4FfYj1gj4QkXCrVYJBMtfb +# BHMqbpEBfCFM1LyuGwN1XXhm2ToxRJozQL8I11pJpMLmqaBn3aQnvKFPObURWBf3 +# JFxGj2T3wWmIdph2PVldQnaHiZdpekjw4KISG2aadMreSx7nDmOu5tTvkpI6nj3c +# AORFJYm2mkQZK37AlLTSYW3rM9nF30sEAMx9HJXDj/chsrIRt7t/8tWMcCxBYKqx +# YxhElRp2Yn72gLD76GSmM9GJB+G9t+ZDpBi4pncB4Q+UDCEdslQpJYls5Q5SUUd0 +# viastkF13nqsX40/ybzTQRESW+UQUOsxxcpyFiIJ33xMdT9j7CFfxCBRa2+xq4aL +# T8LWRV+dIPyhHsXAj6KxfgommfXkaS+YHS312amyHeUbAgMBAAGjQjBAMA8GA1Ud +# EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTs1+OC0nFdZEzf +# Lmc/57qYrhwPTzANBgkqhkiG9w0BAQwFAAOCAgEAu2HZfalsvhfEkRvDoaIAjeNk +# aA9Wz3eucPn9mkqZucl4XAwMX+TmFClWCzZJXURj4K2clhhmGyMNPXnpbWvWVPjS +# PMFDQK4dUPVS/JA7u5iZaWvHwaeoaKQn3J35J64whbn2Z006Po9ZOSJTROvIXQPK +# 7VB6fWIhCoDIc2bRoAVgX+iltKevqPdtNZx8WorWojiZ83iL9E3SIAveBO6Mm0eB +# cg3AFDLvMFkuruBx8lbkapdvklBtlo1oepqyNhR6BvIkuQkRUNcIsbiJeoQjYUIp +# 5aPNoiBB19GcZNnqJqGLFNdMGbJQQXE9P01wI4YMStyB0swylIQNCAmXHE/A7msg +# dDDS4Dk0EIUhFQEI6FUy3nFJ2SgXUE3mvk3RdazQyvtBuEOlqtPDBURPLDab4vri +# RbgjU2wGb2dVf0a1TD9uKFp5JtKkqGKX0h7i7UqLvBv9R0oN32dmfrJbQdA75PQ7 +# 9ARj6e/CVABRoIoqyc54zNXqhwQYs86vSYiv85KZtrPmYQ/ShQDnUBrkG5WdGaG5 +# nLGbsQAe79APT0JsyQq87kP6OnGlyE0mpTX9iV28hWIdMtKgK1TtmlfB2/oQzxm3 +# i0objwG2J5VT6LaJbVu8aNQj6ItRolb58KaAoNYes7wPD1N1KarqE3fk3oyBIa0H +# EEcRrYc9B9F1vM/zZn4wggawMIIEmKADAgECAhAIrUCyYNKcTJ9ezam9k67ZMA0G +# CSqGSIb3DQEBDAUAMGIxCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJ +# bmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xITAfBgNVBAMTGERpZ2lDZXJ0 +# IFRydXN0ZWQgUm9vdCBHNDAeFw0yMTA0MjkwMDAwMDBaFw0zNjA0MjgyMzU5NTla +# MGkxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5EaWdpQ2VydCwgSW5jLjFBMD8GA1UE +# AxM4RGlnaUNlcnQgVHJ1c3RlZCBHNCBDb2RlIFNpZ25pbmcgUlNBNDA5NiBTSEEz +# ODQgMjAyMSBDQTEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDVtC9C +# 0CiteLdd1TlZG7GIQvUzjOs9gZdwxbvEhSYwn6SOaNhc9es0JAfhS0/TeEP0F9ce +# 2vnS1WcaUk8OoVf8iJnBkcyBAz5NcCRks43iCH00fUyAVxJrQ5qZ8sU7H/Lvy0da +# E6ZMswEgJfMQ04uy+wjwiuCdCcBlp/qYgEk1hz1RGeiQIXhFLqGfLOEYwhrMxe6T +# SXBCMo/7xuoc82VokaJNTIIRSFJo3hC9FFdd6BgTZcV/sk+FLEikVoQ11vkunKoA +# FdE3/hoGlMJ8yOobMubKwvSnowMOdKWvObarYBLj6Na59zHh3K3kGKDYwSNHR7Oh +# D26jq22YBoMbt2pnLdK9RBqSEIGPsDsJ18ebMlrC/2pgVItJwZPt4bRc4G/rJvmM +# 1bL5OBDm6s6R9b7T+2+TYTRcvJNFKIM2KmYoX7BzzosmJQayg9Rc9hUZTO1i4F4z +# 8ujo7AqnsAMrkbI2eb73rQgedaZlzLvjSFDzd5Ea/ttQokbIYViY9XwCFjyDKK05 +# huzUtw1T0PhH5nUwjewwk3YUpltLXXRhTT8SkXbev1jLchApQfDVxW0mdmgRQRNY +# mtwmKwH0iU1Z23jPgUo+QEdfyYFQc4UQIyFZYIpkVMHMIRroOBl8ZhzNeDhFMJlP +# /2NPTLuqDQhTQXxYPUez+rbsjDIJAsxsPAxWEQIDAQABo4IBWTCCAVUwEgYDVR0T +# AQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUaDfg67Y7+F8Rhvv+YXsIiGX0TkIwHwYD +# VR0jBBgwFoAU7NfjgtJxXWRM3y5nP+e6mK4cD08wDgYDVR0PAQH/BAQDAgGGMBMG +# A1UdJQQMMAoGCCsGAQUFBwMDMHcGCCsGAQUFBwEBBGswaTAkBggrBgEFBQcwAYYY +# aHR0cDovL29jc3AuZGlnaWNlcnQuY29tMEEGCCsGAQUFBzAChjVodHRwOi8vY2Fj +# ZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNlcnRUcnVzdGVkUm9vdEc0LmNydDBDBgNV +# HR8EPDA6MDigNqA0hjJodHRwOi8vY3JsMy5kaWdpY2VydC5jb20vRGlnaUNlcnRU +# cnVzdGVkUm9vdEc0LmNybDAcBgNVHSAEFTATMAcGBWeBDAEDMAgGBmeBDAEEATAN +# BgkqhkiG9w0BAQwFAAOCAgEAOiNEPY0Idu6PvDqZ01bgAhql+Eg08yy25nRm95Ry +# sQDKr2wwJxMSnpBEn0v9nqN8JtU3vDpdSG2V1T9J9Ce7FoFFUP2cvbaF4HZ+N3HL +# IvdaqpDP9ZNq4+sg0dVQeYiaiorBtr2hSBh+3NiAGhEZGM1hmYFW9snjdufE5Btf +# Q/g+lP92OT2e1JnPSt0o618moZVYSNUa/tcnP/2Q0XaG3RywYFzzDaju4ImhvTnh +# OE7abrs2nfvlIVNaw8rpavGiPttDuDPITzgUkpn13c5UbdldAhQfQDN8A+KVssIh +# dXNSy0bYxDQcoqVLjc1vdjcshT8azibpGL6QB7BDf5WIIIJw8MzK7/0pNVwfiThV +# 9zeKiwmhywvpMRr/LhlcOXHhvpynCgbWJme3kuZOX956rEnPLqR0kq3bPKSchh/j +# wVYbKyP/j7XqiHtwa+aguv06P0WmxOgWkVKLQcBIhEuWTatEQOON8BUozu3xGFYH +# Ki8QxAwIZDwzj64ojDzLj4gLDb879M4ee47vtevLt/B3E+bnKD+sEq6lLyJsQfmC +# XBVmzGwOysWGw/YmMwwHS6DTBwJqakAwSEs0qFEgu60bhQjiWQ1tygVQK+pKHJ6l +# /aCnHwZ05/LWUpD9r4VIIflXO7ScA+2GRfS0YW6/aOImYIbqyK+p/pQd52MbOoZW +# eE4wggd3MIIFX6ADAgECAhAHHxQbizANJfMU6yMM0NHdMA0GCSqGSIb3DQEBCwUA +# MGkxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5EaWdpQ2VydCwgSW5jLjFBMD8GA1UE +# AxM4RGlnaUNlcnQgVHJ1c3RlZCBHNCBDb2RlIFNpZ25pbmcgUlNBNDA5NiBTSEEz +# ODQgMjAyMSBDQTEwHhcNMjIwMTE3MDAwMDAwWhcNMjUwMTE1MjM1OTU5WjB8MQsw +# CQYDVQQGEwJVUzEPMA0GA1UECBMGT3JlZ29uMRIwEAYDVQQHEwlCZWF2ZXJ0b24x +# IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMSMwIQYDVQQDExpQ +# eXRob24gU29mdHdhcmUgRm91bmRhdGlvbjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +# ADCCAgoCggIBAKgc0BTT+iKbtK6f2mr9pNMUTcAJxKdsuOiSYgDFfwhjQy89koM7 +# uP+QV/gwx8MzEt3c9tLJvDccVWQ8H7mVsk/K+X+IufBLCgUi0GGAZUegEAeRlSXx +# xhYScr818ma8EvGIZdiSOhqjYc4KnfgfIS4RLtZSrDFG2tN16yS8skFa3IHyvWdb +# D9PvZ4iYNAS4pjYDRjT/9uzPZ4Pan+53xZIcDgjiTwOh8VGuppxcia6a7xCyKoOA +# GjvCyQsj5223v1/Ig7Dp9mGI+nh1E3IwmyTIIuVHyK6Lqu352diDY+iCMpk9Zanm +# SjmB+GMVs+H/gOiofjjtf6oz0ki3rb7sQ8fTnonIL9dyGTJ0ZFYKeb6BLA66d2GA +# LwxZhLe5WH4Np9HcyXHACkppsE6ynYjTOd7+jN1PRJahN1oERzTzEiV6nCO1M3U1 +# HbPTGyq52IMFSBM2/07WTJSbOeXjvYR7aUxK9/ZkJiacl2iZI7IWe7JKhHohqKuc +# eQNyOzxTakLcRkzynvIrk33R9YVqtB4L6wtFxhUjvDnQg16xot2KVPdfyPAWd81w +# tZADmrUtsZ9qG79x1hBdyOl4vUtVPECuyhCxaw+faVjumapPUnwo8ygflJJ74J+B +# Yxf6UuD7m8yzsfXWkdv52DjL74TxzuFTLHPyARWCSCAbzn3ZIly+qIqDAgMBAAGj +# ggIGMIICAjAfBgNVHSMEGDAWgBRoN+Drtjv4XxGG+/5hewiIZfROQjAdBgNVHQ4E +# FgQUt/1Teh2XDuUj2WW3siYWJgkZHA8wDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQM +# MAoGCCsGAQUFBwMDMIG1BgNVHR8Ega0wgaowU6BRoE+GTWh0dHA6Ly9jcmwzLmRp +# Z2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNENvZGVTaWduaW5nUlNBNDA5NlNI +# QTM4NDIwMjFDQTEuY3JsMFOgUaBPhk1odHRwOi8vY3JsNC5kaWdpY2VydC5jb20v +# RGlnaUNlcnRUcnVzdGVkRzRDb2RlU2lnbmluZ1JTQTQwOTZTSEEzODQyMDIxQ0Ex +# LmNybDA+BgNVHSAENzA1MDMGBmeBDAEEATApMCcGCCsGAQUFBwIBFhtodHRwOi8v +# d3d3LmRpZ2ljZXJ0LmNvbS9DUFMwgZQGCCsGAQUFBwEBBIGHMIGEMCQGCCsGAQUF +# BzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wXAYIKwYBBQUHMAKGUGh0dHA6 +# Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNENvZGVTaWdu +# aW5nUlNBNDA5NlNIQTM4NDIwMjFDQTEuY3J0MAwGA1UdEwEB/wQCMAAwDQYJKoZI +# hvcNAQELBQADggIBABxv4AeV/5ltkELHSC63fXAFYS5tadcWTiNc2rskrNLrfH1N +# s0vgSZFoQxYBFKI159E8oQQ1SKbTEubZ/B9kmHPhprHya08+VVzxC88pOEvz68nA +# 82oEM09584aILqYmj8Pj7h/kmZNzuEL7WiwFa/U1hX+XiWfLIJQsAHBla0i7QRF2 +# de8/VSF0XXFa2kBQ6aiTsiLyKPNbaNtbcucaUdn6vVUS5izWOXM95BSkFSKdE45O +# q3FForNJXjBvSCpwcP36WklaHL+aHu1upIhCTUkzTHMh8b86WmjRUqbrnvdyR2yd +# I5l1OqcMBjkpPpIV6wcc+KY/RH2xvVuuoHjlUjwq2bHiNoX+W1scCpnA8YTs2d50 +# jDHUgwUo+ciwpffH0Riq132NFmrH3r67VaN3TuBxjI8SIZM58WEDkbeoriDk3hxU +# 8ZWV7b8AW6oyVBGfM06UgkfMb58h+tJPrFx8VI/WLq1dTqMfZOm5cuclMnUHs2uq +# rRNtnV8UfidPBL4ZHkTcClQbCoz0UbLhkiDvIS00Dn+BBcxw/TKqVL4Oaz3bkMSs +# M46LciTeucHY9ExRVt3zy7i149sd+F4QozPqn7FrSVHXmem3r7bjyHTxOgqxRCVa +# 18Vtx7P/8bYSBeS+WHCKcliFCecspusCDSlnRUjZwyPdP0VHxaZg2unjHY3rMYIa +# tzCCGrMCAQEwfTBpMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIElu +# Yy4xQTA/BgNVBAMTOERpZ2lDZXJ0IFRydXN0ZWQgRzQgQ29kZSBTaWduaW5nIFJT +# QTQwOTYgU0hBMzg0IDIwMjEgQ0ExAhAHHxQbizANJfMU6yMM0NHdMA0GCWCGSAFl +# AwQCAQUAoIHIMBkGCSqGSIb3DQEJAzEMBgorBgEEAYI3AgEEMBwGCisGAQQBgjcC +# AQsxDjAMBgorBgEEAYI3AgEVMC8GCSqGSIb3DQEJBDEiBCBnAZ6P7YvTwq0fbF62 +# o7E75R0LxsW5OtyYiFESQckLhjBcBgorBgEEAYI3AgEMMU4wTKBGgEQAQgB1AGkA +# bAB0ADoAIABSAGUAbABlAGEAcwBlAF8AdgAzAC4AMQAyAC4AMABfADIAMAAyADMA +# MQAwADAAMgAuADAAMaECgAAwDQYJKoZIhvcNAQEBBQAEggIAk+1ya4KYmJwPdyhn +# UCoARJ3l7b9UN3AATHE7oO50+piT9AvRbtmqgY0Tper2DQmBe8FRDBu/52l6nBtc +# 30SBrwPvs3zgvQSfhtjozpOo8bAv23MQaNbMlzW07aVBU+4Vd0v+65FwIkBiCuDk +# CprYElSno9fsRpktxYJ9lsmdxsgyxdFg6VDzkwwp0wJBT1VVdtBNx3143gVExXE2 +# g+zSfMug1+o3Jj054gmK0BYYvg7T+qHRRjYmVvuXMLXRgY5so6vVHjv6oARFGQ9/ +# dEcY0b04c3jp/lSK+Nd0v3aY1FeguOnxPz32PHNg/ICygyRG+ivLyHkdLIJrGuIo +# Te6dAgLgCKcooQ9MVNG2F/gw5tVCYrEOAyNwUxgl7zX7fXVlTAgAE2vxawb8NCeQ +# f3InGRIkocO3hrEFVlCW58WG0utSkUwzknrkpMEscLEIZwO30gBartqyTajYsi4+ +# vSOTW6CQp2+CvwtDpF4ll40sxPmNbDX7KWkD/OhD8zwxvRpKrkgjIpdzlfzen9uV +# 70E2UV2JHWVvvdqERRILOQPbSERiPPaPwjjtjWa6HwzF386We4USv+mL4AZhmxgP +# 4ov0+c6qgVdmjUpTA8Q/RoX6WXHyhvflAsfhX/h0sBtqxhdiqoXtIH6bqAZfSpOq +# 2GN9KZ2WSppZy2nWeULuSvSwjmahghdAMIIXPAYKKwYBBAGCNwMDATGCFywwghco +# BgkqhkiG9w0BBwKgghcZMIIXFQIBAzEPMA0GCWCGSAFlAwQCAQUAMHgGCyqGSIb3 +# DQEJEAEEoGkEZzBlAgEBBglghkgBhv1sBwEwMTANBglghkgBZQMEAgEFAAQgnmxA +# vhfTXC1rhKE1Olq0ip2jyhXJM2BAt3lCUFLFnQkCEQC0R5Uki33yd+oDVBKaPX6T +# GA8yMDIzMTAwMjEzMjMzMlqgghMJMIIGwjCCBKqgAwIBAgIQBUSv85SdCDmmv9s/ +# X+VhFjANBgkqhkiG9w0BAQsFADBjMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGln +# aUNlcnQsIEluYy4xOzA5BgNVBAMTMkRpZ2lDZXJ0IFRydXN0ZWQgRzQgUlNBNDA5 +# NiBTSEEyNTYgVGltZVN0YW1waW5nIENBMB4XDTIzMDcxNDAwMDAwMFoXDTM0MTAx +# MzIzNTk1OVowSDELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJbmMu +# MSAwHgYDVQQDExdEaWdpQ2VydCBUaW1lc3RhbXAgMjAyMzCCAiIwDQYJKoZIhvcN +# AQEBBQADggIPADCCAgoCggIBAKNTRYcdg45brD5UsyPgz5/X5dLnXaEOCdwvSKOX +# ejsqnGfcYhVYwamTEafNqrJq3RApih5iY2nTWJw1cb86l+uUUI8cIOrHmjsvlmbj +# aedp/lvD1isgHMGXlLSlUIHyz8sHpjBoyoNC2vx/CSSUpIIa2mq62DvKXd4ZGIX7 +# ReoNYWyd/nFexAaaPPDFLnkPG2ZS48jWPl/aQ9OE9dDH9kgtXkV1lnX+3RChG4PB +# uOZSlbVH13gpOWvgeFmX40QrStWVzu8IF+qCZE3/I+PKhu60pCFkcOvV5aDaY7Mu +# 6QXuqvYk9R28mxyyt1/f8O52fTGZZUdVnUokL6wrl76f5P17cz4y7lI0+9S769Sg +# LDSb495uZBkHNwGRDxy1Uc2qTGaDiGhiu7xBG3gZbeTZD+BYQfvYsSzhUa+0rRUG +# FOpiCBPTaR58ZE2dD9/O0V6MqqtQFcmzyrzXxDtoRKOlO0L9c33u3Qr/eTQQfqZc +# ClhMAD6FaXXHg2TWdc2PEnZWpST618RrIbroHzSYLzrqawGw9/sqhux7UjipmAmh +# cbJsca8+uG+W1eEQE/5hRwqM/vC2x9XH3mwk8L9CgsqgcT2ckpMEtGlwJw1Pt7U2 +# 0clfCKRwo+wK8REuZODLIivK8SgTIUlRfgZm0zu++uuRONhRB8qUt+JQofM604qD +# y0B7AgMBAAGjggGLMIIBhzAOBgNVHQ8BAf8EBAMCB4AwDAYDVR0TAQH/BAIwADAW +# BgNVHSUBAf8EDDAKBggrBgEFBQcDCDAgBgNVHSAEGTAXMAgGBmeBDAEEAjALBglg +# hkgBhv1sBwEwHwYDVR0jBBgwFoAUuhbZbU2FL3MpdpovdYxqII+eyG8wHQYDVR0O +# BBYEFKW27xPn783QZKHVVqllMaPe1eNJMFoGA1UdHwRTMFEwT6BNoEuGSWh0dHA6 +# Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNFJTQTQwOTZTSEEy +# NTZUaW1lU3RhbXBpbmdDQS5jcmwwgZAGCCsGAQUFBwEBBIGDMIGAMCQGCCsGAQUF +# BzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wWAYIKwYBBQUHMAKGTGh0dHA6 +# Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNFJTQTQwOTZT +# SEEyNTZUaW1lU3RhbXBpbmdDQS5jcnQwDQYJKoZIhvcNAQELBQADggIBAIEa1t6g +# qbWYF7xwjU+KPGic2CX/yyzkzepdIpLsjCICqbjPgKjZ5+PF7SaCinEvGN1Ott5s +# 1+FgnCvt7T1IjrhrunxdvcJhN2hJd6PrkKoS1yeF844ektrCQDifXcigLiV4JZ0q +# BXqEKZi2V3mP2yZWK7Dzp703DNiYdk9WuVLCtp04qYHnbUFcjGnRuSvExnvPnPp4 +# 4pMadqJpddNQ5EQSviANnqlE0PjlSXcIWiHFtM+YlRpUurm8wWkZus8W8oM3NG6w +# QSbd3lqXTzON1I13fXVFoaVYJmoDRd7ZULVQjK9WvUzF4UbFKNOt50MAcN7MmJ4Z +# iQPq1JE3701S88lgIcRWR+3aEUuMMsOI5ljitts++V+wQtaP4xeR0arAVeOGv6wn +# LEHQmjNKqDbUuXKWfpd5OEhfysLcPTLfddY2Z1qJ+Panx+VPNTwAvb6cKmx5Adza +# ROY63jg7B145WPR8czFVoIARyxQMfq68/qTreWWqaNYiyjvrmoI1VygWy2nyMpqy +# 0tg6uLFGhmu6F/3Ed2wVbK6rr3M66ElGt9V/zLY4wNjsHPW2obhDLN9OTH0eaHDA +# dwrUAuBcYLso/zjlUlrWrBciI0707NMX+1Br/wd3H3GXREHJuEbTbDJ8WC9nR2Xl +# G3O2mflrLAZG70Ee8PBf4NvZrZCARK+AEEGKMIIGrjCCBJagAwIBAgIQBzY3tyRU +# fNhHrP0oZipeWzANBgkqhkiG9w0BAQsFADBiMQswCQYDVQQGEwJVUzEVMBMGA1UE +# ChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSEwHwYD +# VQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMjIwMzIzMDAwMDAwWhcN +# MzcwMzIyMjM1OTU5WjBjMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs +# IEluYy4xOzA5BgNVBAMTMkRpZ2lDZXJ0IFRydXN0ZWQgRzQgUlNBNDA5NiBTSEEy +# NTYgVGltZVN0YW1waW5nIENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC +# AgEAxoY1BkmzwT1ySVFVxyUDxPKRN6mXUaHW0oPRnkyibaCwzIP5WvYRoUQVQl+k +# iPNo+n3znIkLf50fng8zH1ATCyZzlm34V6gCff1DtITaEfFzsbPuK4CEiiIY3+va +# PcQXf6sZKz5C3GeO6lE98NZW1OcoLevTsbV15x8GZY2UKdPZ7Gnf2ZCHRgB720RB +# idx8ald68Dd5n12sy+iEZLRS8nZH92GDGd1ftFQLIWhuNyG7QKxfst5Kfc71ORJn +# 7w6lY2zkpsUdzTYNXNXmG6jBZHRAp8ByxbpOH7G1WE15/tePc5OsLDnipUjW8LAx +# E6lXKZYnLvWHpo9OdhVVJnCYJn+gGkcgQ+NDY4B7dW4nJZCYOjgRs/b2nuY7W+yB +# 3iIU2YIqx5K/oN7jPqJz+ucfWmyU8lKVEStYdEAoq3NDzt9KoRxrOMUp88qqlnNC +# aJ+2RrOdOqPVA+C/8KI8ykLcGEh/FDTP0kyr75s9/g64ZCr6dSgkQe1CvwWcZklS +# UPRR8zZJTYsg0ixXNXkrqPNFYLwjjVj33GHek/45wPmyMKVM1+mYSlg+0wOI/rOP +# 015LdhJRk8mMDDtbiiKowSYI+RQQEgN9XyO7ZONj4KbhPvbCdLI/Hgl27KtdRnXi +# YKNYCQEoAA6EVO7O6V3IXjASvUaetdN2udIOa5kM0jO0zbECAwEAAaOCAV0wggFZ +# MBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFLoW2W1NhS9zKXaaL3WMaiCP +# nshvMB8GA1UdIwQYMBaAFOzX44LScV1kTN8uZz/nupiuHA9PMA4GA1UdDwEB/wQE +# AwIBhjATBgNVHSUEDDAKBggrBgEFBQcDCDB3BggrBgEFBQcBAQRrMGkwJAYIKwYB +# BQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBBBggrBgEFBQcwAoY1aHR0 +# cDovL2NhY2VydHMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0VHJ1c3RlZFJvb3RHNC5j +# cnQwQwYDVR0fBDwwOjA4oDagNIYyaHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0Rp +# Z2lDZXJ0VHJ1c3RlZFJvb3RHNC5jcmwwIAYDVR0gBBkwFzAIBgZngQwBBAIwCwYJ +# YIZIAYb9bAcBMA0GCSqGSIb3DQEBCwUAA4ICAQB9WY7Ak7ZvmKlEIgF+ZtbYIULh +# sBguEE0TzzBTzr8Y+8dQXeJLKftwig2qKWn8acHPHQfpPmDI2AvlXFvXbYf6hCAl +# NDFnzbYSlm/EUExiHQwIgqgWvalWzxVzjQEiJc6VaT9Hd/tydBTX/6tPiix6q4XN +# Q1/tYLaqT5Fmniye4Iqs5f2MvGQmh2ySvZ180HAKfO+ovHVPulr3qRCyXen/KFSJ +# 8NWKcXZl2szwcqMj+sAngkSumScbqyQeJsG33irr9p6xeZmBo1aGqwpFyd/EjaDn +# mPv7pp1yr8THwcFqcdnGE4AJxLafzYeHJLtPo0m5d2aR8XKc6UsCUqc3fpNTrDsd +# CEkPlM05et3/JWOZJyw9P2un8WbDQc1PtkCbISFA0LcTJM3cHXg65J6t5TRxktcm +# a+Q4c6umAU+9Pzt4rUyt+8SVe+0KXzM5h0F4ejjpnOHdI/0dKNPH+ejxmF/7K9h+ +# 8kaddSweJywm228Vex4Ziza4k9Tm8heZWcpw8De/mADfIBZPJ/tgZxahZrrdVcA6 +# KYawmKAr7ZVBtzrVFZgxtGIJDwq9gdkT/r+k0fNX2bwE+oLeMt8EifAAzV3C+dAj +# fwAL5HYCJtnwZXZCpimHCUcr5n8apIUP/JiW9lVUKx+A+sDyDivl1vupL0QVSucT +# Dh3bNzgaoSv27dZ8/DCCBY0wggR1oAMCAQICEA6bGI750C3n79tQ4ghAGFowDQYJ +# KoZIhvcNAQEMBQAwZTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IElu # YzEZMBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTEkMCIGA1UEAxMbRGlnaUNlcnQg -# QXNzdXJlZCBJRCBSb290IENBMB4XDTE2MDEwNzEyMDAwMFoXDTMxMDEwNzEyMDAw -# MFowcjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UE -# CxMQd3d3LmRpZ2ljZXJ0LmNvbTExMC8GA1UEAxMoRGlnaUNlcnQgU0hBMiBBc3N1 -# cmVkIElEIFRpbWVzdGFtcGluZyBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -# AQoCggEBAL3QMu5LzY9/3am6gpnFOVQoV7YjSsQOB0UzURB90Pl9TWh+57ag9I2z -# iOSXv2MhkJi/E7xX08PhfgjWahQAOPcuHjvuzKb2Mln+X2U/4Jvr40ZHBhpVfgsn -# fsCi9aDg3iI/Dv9+lfvzo7oiPhisEeTwmQNtO4V8CdPuXciaC1TjqAlxa+DPIhAP -# dc9xck4Krd9AOly3UeGheRTGTSQjMF287DxgaqwvB8z98OpH2YhQXv1mblZhJymJ -# hFHmgudGUP2UKiyn5HU+upgPhH+fMRTWrdXyZMt7HgXQhBlyF/EXBu89zdZN7wZC -# /aJTKk+FHcQdPK/P2qwQ9d2srOlW/5MCAwEAAaOCAc4wggHKMB0GA1UdDgQWBBT0 -# tuEgHf4prtLkYaWyoiWyyBc1bjAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd -# 823IDzASBgNVHRMBAf8ECDAGAQH/AgEAMA4GA1UdDwEB/wQEAwIBhjATBgNVHSUE -# DDAKBggrBgEFBQcDCDB5BggrBgEFBQcBAQRtMGswJAYIKwYBBQUHMAGGGGh0dHA6 -# Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBDBggrBgEFBQcwAoY3aHR0cDovL2NhY2VydHMu -# ZGlnaWNlcnQuY29tL0RpZ2lDZXJ0QXNzdXJlZElEUm9vdENBLmNydDCBgQYDVR0f -# BHoweDA6oDigNoY0aHR0cDovL2NybDQuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0QXNz -# dXJlZElEUm9vdENBLmNybDA6oDigNoY0aHR0cDovL2NybDMuZGlnaWNlcnQuY29t -# L0RpZ2lDZXJ0QXNzdXJlZElEUm9vdENBLmNybDBQBgNVHSAESTBHMDgGCmCGSAGG -# /WwAAgQwKjAoBggrBgEFBQcCARYcaHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQ -# UzALBglghkgBhv1sBwEwDQYJKoZIhvcNAQELBQADggEBAHGVEulRh1Zpze/d2nyq -# Y3qzeM8GN0CE70uEv8rPAwL9xafDDiBCLK938ysfDCFaKrcFNB1qrpn4J6Jmvwmq -# YN92pDqTD/iy0dh8GWLoXoIlHsS6HHssIeLWWywUNUMEaLLbdQLgcseY1jxk5R9I -# EBhfiThhTWJGJIdjjJFSLK8pieV4H9YLFKWA1xJHcLN11ZOFk362kmf7U2GJqPVr -# lsD0WGkNfMgBsbkodbeZY4UijGHKeZR+WfyMD+NvtQEmtmyl7odRIeRYYJu6DC0r -# baLEfrvEJStHAgh8Sa4TtuF8QkIoxhhWz0E0tmZdtnR79VYzIi8iNrJLokqV2PWm -# jlIxggKGMIICggIBATCBhjByMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNl -# cnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYDVQQDEyhEaWdp -# Q2VydCBTSEEyIEFzc3VyZWQgSUQgVGltZXN0YW1waW5nIENBAhANQkrgvjqI/2BA -# Ic4UAPDdMA0GCWCGSAFlAwQCAQUAoIHRMBoGCSqGSIb3DQEJAzENBgsqhkiG9w0B -# CRABBDAcBgkqhkiG9w0BCQUxDxcNMjEwNjI4MTUzMzE1WjArBgsqhkiG9w0BCRAC -# DDEcMBowGDAWBBTh14Ko4ZG+72vKFpG1qrSUpiSb8zAvBgkqhkiG9w0BCQQxIgQg -# U63BaTxWvK6IjoVixdUxR2Ka9ngnAWl8TjG38EljW2IwNwYLKoZIhvcNAQkQAi8x -# KDAmMCQwIgQgsxCQBrwK2YMHkVcp4EQDQVyD4ykrYU8mlkyNNXHs9akwDQYJKoZI -# hvcNAQEBBQAEggEAmed9c1/5eDuMSWHIIs7oR1RWaY6OR4PjxqCkybJRvoGl8wO1 -# WmUE58PVZfTNtho02cKeP+HNsyohxFp78bBXwObFNC97vbmKR3U7ANBnu3iEE/hD -# V3M5RD1BN4SE8yzfRlN4DB5H8rgvArWpjO9JedbVrBKrEe4FwtU8vbQhdAyFMGv5 -# ITWLQgiDJOvAOjfC014JKRgnTMgY1JntNHO1ny2dgkYYql7kO7Jbgu3h/lox/lF2 -# 1Sfonu9hDsv1121CatHRMU8B3kDeiF2P9/ifu5e0UULd0mTsMF7CIUs1/TNX0bAp -# A6DuV3o0VVOjsfOSgQUiz/nMxVmQoWFv2sNc3w== +# QXNzdXJlZCBJRCBSb290IENBMB4XDTIyMDgwMTAwMDAwMFoXDTMxMTEwOTIzNTk1 +# OVowYjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UE +# CxMQd3d3LmRpZ2ljZXJ0LmNvbTEhMB8GA1UEAxMYRGlnaUNlcnQgVHJ1c3RlZCBS +# b290IEc0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAv+aQc2jeu+Rd +# SjwwIjBpM+zCpyUuySE98orYWcLhKac9WKt2ms2uexuEDcQwH/MbpDgW61bGl20d +# q7J58soR0uRf1gU8Ug9SH8aeFaV+vp+pVxZZVXKvaJNwwrK6dZlqczKU0RBEEC7f +# gvMHhOZ0O21x4i0MG+4g1ckgHWMpLc7sXk7Ik/ghYZs06wXGXuxbGrzryc/NrDRA +# X7F6Zu53yEioZldXn1RYjgwrt0+nMNlW7sp7XeOtyU9e5TXnMcvak17cjo+A2raR +# mECQecN4x7axxLVqGDgDEI3Y1DekLgV9iPWCPhCRcKtVgkEy19sEcypukQF8IUzU +# vK4bA3VdeGbZOjFEmjNAvwjXWkmkwuapoGfdpCe8oU85tRFYF/ckXEaPZPfBaYh2 +# mHY9WV1CdoeJl2l6SPDgohIbZpp0yt5LHucOY67m1O+SkjqePdwA5EUlibaaRBkr +# fsCUtNJhbesz2cXfSwQAzH0clcOP9yGyshG3u3/y1YxwLEFgqrFjGESVGnZifvaA +# sPvoZKYz0YkH4b235kOkGLimdwHhD5QMIR2yVCkliWzlDlJRR3S+Jqy2QXXeeqxf +# jT/JvNNBERJb5RBQ6zHFynIWIgnffEx1P2PsIV/EIFFrb7GrhotPwtZFX50g/KEe +# xcCPorF+CiaZ9eRpL5gdLfXZqbId5RsCAwEAAaOCATowggE2MA8GA1UdEwEB/wQF +# MAMBAf8wHQYDVR0OBBYEFOzX44LScV1kTN8uZz/nupiuHA9PMB8GA1UdIwQYMBaA +# FEXroq/0ksuCMS1Ri6enIZ3zbcgPMA4GA1UdDwEB/wQEAwIBhjB5BggrBgEFBQcB +# AQRtMGswJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBDBggr +# BgEFBQcwAoY3aHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0QXNz +# dXJlZElEUm9vdENBLmNydDBFBgNVHR8EPjA8MDqgOKA2hjRodHRwOi8vY3JsMy5k +# aWdpY2VydC5jb20vRGlnaUNlcnRBc3N1cmVkSURSb290Q0EuY3JsMBEGA1UdIAQK +# MAgwBgYEVR0gADANBgkqhkiG9w0BAQwFAAOCAQEAcKC/Q1xV5zhfoKN0Gz22Ftf3 +# v1cHvZqsoYcs7IVeqRq7IviHGmlUIu2kiHdtvRoU9BNKei8ttzjv9P+Aufih9/Jy +# 3iS8UgPITtAq3votVs/59PesMHqai7Je1M/RQ0SbQyHrlnKhSLSZy51PpwYDE3cn +# RNTnf+hZqPC/Lwum6fI0POz3A8eHqNJMQBk1RmppVLC4oVaO7KTVPeix3P0c2PR3 +# WlxUjG/voVA9/HYJaISfb8rbII01YBwCA8sgsKxYoA5AY8WYIsGyWfVVa88nq2x2 +# zm8jLfR+cWojayL/ErhULSd+2DrZ8LaHlv1b0VysGMNNn3O3AamfV6peKOK5lDGC +# A3YwggNyAgEBMHcwYzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ +# bmMuMTswOQYDVQQDEzJEaWdpQ2VydCBUcnVzdGVkIEc0IFJTQTQwOTYgU0hBMjU2 +# IFRpbWVTdGFtcGluZyBDQQIQBUSv85SdCDmmv9s/X+VhFjANBglghkgBZQMEAgEF +# AKCB0TAaBgkqhkiG9w0BCQMxDQYLKoZIhvcNAQkQAQQwHAYJKoZIhvcNAQkFMQ8X +# DTIzMTAwMjEzMjMzMlowKwYLKoZIhvcNAQkQAgwxHDAaMBgwFgQUZvArMsLCyQ+C +# Xc6qisnGTxmcz0AwLwYJKoZIhvcNAQkEMSIEIBa6X0J/pyxEP0qKmxfT51tQm4Im +# u/63uybfmuePN1UHMDcGCyqGSIb3DQEJEAIvMSgwJjAkMCIEINL25G3tdCLM0dRA +# V2hBNm+CitpVmq4zFq9NGprUDHgoMA0GCSqGSIb3DQEBAQUABIICAFRAXwhn3az4 +# r+zhZk2K3gibUwGFM4dHGKh1RD383PsnUii90EaBVzixPIppOs7v0iZOER2tce8u +# xgQ3IZtjRIuQmCh+9I6zWIH59MssGU6igexTAjc0dIfV66UeFlh2aSWWckxmaTI/ +# jXgpqx2sk4dlqJKrR/d084gyevoCkQNerED/FuwJ6bX8omhIikyHdoo6hbL595tK +# 4ekxek027svbPFC0WbAxZXLq428C5endJaAQV2RTCk0B1vgPFw5bvznldlQnRqMu +# vcd1GSoSVN16kw5UKWXEyP0e07yiXEPkFoPBSC/c5o7COjidK2VOlVY/gsorVMEA +# rx/apEwf1F70vD2caiIsUAhrmPzyslFZfU6o2fKXI/vrBTUvbzb5Vz+IGoXlwTH+ +# loWh4loof+pJjbpJJj26GQwssAl3M/m/3BGamnmf97CJShvPrM0w8WOLkXtn6/M4 +# Y15ywECiRhaQX7NUVzRBl1hGDDD0mFv/OHzMer3C4/8I04whBnfvMxelUb4YhlM6 +# e+F6DzvI91OWTrpjsPKOHE3ds9ra+F1yPkV1RMQMRx/xWd0D274R8W2baHxrYgS3 +# 2LYPSAYFMi3cmF6Df84iIG2d0qj0x3ioXDUUk72YTCUO1Xnz9jkHSCU7Bq2GL9uP +# z4sEyNDU23dEmfGD672vMRuZ6Ua/ks5J # SIG # End signature block diff --git a/env/Scripts/activate b/env/Scripts/activate index e642fb2e9ade0ddbc988f9751b042dc01490160f..83d829ceb3e7ecbbb3930bae64de1b991098ee93 100644 --- a/env/Scripts/activate +++ b/env/Scripts/activate @@ -1,5 +1,5 @@ # This file must be used with "source bin/activate" *from bash* -# you cannot run it directly +# You cannot run it directly deactivate () { # reset old environment variables @@ -28,6 +28,7 @@ deactivate () { fi unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT if [ ! "${1:-}" = "nondestructive" ] ; then # Self destruct! unset -f deactivate @@ -37,8 +38,15 @@ deactivate () { # unset irrelevant variables deactivate nondestructive -VIRTUAL_ENV="C:\Users\eldir\OneDrive - UNIVERSITAS INDONESIA\SEMESTER 6\PPL\a4-logistik\env" -export VIRTUAL_ENV +# on Windows, a path can contain colons and backslashes and has to be converted: +if [ "$OSTYPE" = "cygwin" ] || [ "$OSTYPE" = "msys" ] ; then + # transform D:\path\to\venv to /d/path/to/venv on MSYS + # and to /cygdrive/d/path/to/venv on Cygwin + export VIRTUAL_ENV=$(cygpath "D:\PPL\a4-logistik\env") +else + # use the path as-is + export VIRTUAL_ENV="D:\PPL\a4-logistik\env" +fi _OLD_VIRTUAL_PATH="$PATH" PATH="$VIRTUAL_ENV/Scripts:$PATH" @@ -56,6 +64,8 @@ if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then _OLD_VIRTUAL_PS1="${PS1:-}" PS1="(env) ${PS1:-}" export PS1 + VIRTUAL_ENV_PROMPT="(env) " + export VIRTUAL_ENV_PROMPT fi # This should detect bash and zsh, which have a hash command that must diff --git a/env/Scripts/activate.bat b/env/Scripts/activate.bat index 02e10db2c2470d2bfae7393ee2364c7405b9d015..441312f56c713e1e063c2623a843142f07ca72af 100644 --- a/env/Scripts/activate.bat +++ b/env/Scripts/activate.bat @@ -8,7 +8,7 @@ if defined _OLD_CODEPAGE ( "%SystemRoot%\System32\chcp.com" 65001 > nul ) -set VIRTUAL_ENV=C:\Users\eldir\OneDrive - UNIVERSITAS INDONESIA\SEMESTER 6\PPL\a4-logistik\env +set VIRTUAL_ENV=D:\PPL\a4-logistik\env if not defined PROMPT set PROMPT=$P$G @@ -25,6 +25,7 @@ if defined _OLD_VIRTUAL_PATH set PATH=%_OLD_VIRTUAL_PATH% if not defined _OLD_VIRTUAL_PATH set _OLD_VIRTUAL_PATH=%PATH% set PATH=%VIRTUAL_ENV%\Scripts;%PATH% +set VIRTUAL_ENV_PROMPT=(env) :END if defined _OLD_CODEPAGE ( diff --git a/env/Scripts/deactivate.bat b/env/Scripts/deactivate.bat index 1205c618686fbb0e28bb9f61a782d7be790d1163..62a39a7584f4d7c5fbc31758e3e9e7eff700276d 100644 --- a/env/Scripts/deactivate.bat +++ b/env/Scripts/deactivate.bat @@ -17,5 +17,6 @@ if defined _OLD_VIRTUAL_PATH ( set _OLD_VIRTUAL_PATH= set VIRTUAL_ENV= +set VIRTUAL_ENV_PROMPT= :END diff --git a/env/Scripts/pip.exe b/env/Scripts/pip.exe index 6bf27412711b6f0f31603935323f7e96bd3683ed..8c12433cae8225bc3f158cab721375b9a87b84dc 100644 Binary files a/env/Scripts/pip.exe and b/env/Scripts/pip.exe differ diff --git a/env/Scripts/pip3.9.exe b/env/Scripts/pip3.9.exe deleted file mode 100644 index 6bf27412711b6f0f31603935323f7e96bd3683ed..0000000000000000000000000000000000000000 Binary files a/env/Scripts/pip3.9.exe and /dev/null differ diff --git a/env/Scripts/pip3.exe b/env/Scripts/pip3.exe index 6bf27412711b6f0f31603935323f7e96bd3683ed..8c12433cae8225bc3f158cab721375b9a87b84dc 100644 Binary files a/env/Scripts/pip3.exe and b/env/Scripts/pip3.exe differ diff --git a/env/Scripts/python.exe b/env/Scripts/python.exe index 7885ec4fa9f470f967a2445c36924fecd95b9ca8..71092d0867a15def9214a0470984023b72167726 100644 Binary files a/env/Scripts/python.exe and b/env/Scripts/python.exe differ diff --git a/env/Scripts/pythonw.exe b/env/Scripts/pythonw.exe index 5ffbf4dbc0de6fef7706f73f84e6702c5e62a547..4710dd54c335f620716f1c3b2cf5c281cba59992 100644 Binary files a/env/Scripts/pythonw.exe and b/env/Scripts/pythonw.exe differ diff --git a/env/pyvenv.cfg b/env/pyvenv.cfg index 5702368248ab9a3cca2f6c6e89c921f256095a65..a82a846ced666c94b32dfa5609ed54fece836e7e 100644 --- a/env/pyvenv.cfg +++ b/env/pyvenv.cfg @@ -1,3 +1,5 @@ -home = C:\Users\eldir\AppData\Local\Programs\Python\Python39 +home = C:\Python312 include-system-site-packages = false -version = 3.9.6 +version = 3.12.0 +executable = C:\Python312\python.exe +command = C:\Python312\python.exe -m venv D:\PPL\a4-logistik\env diff --git a/project_a4_logistik/urls.py b/project_a4_logistik/urls.py index 703657ec63c803463b7fea4933aed169d86c7e67..69fa3f0ecd3438218363895a6e6afe9efce5aa9b 100644 --- a/project_a4_logistik/urls.py +++ b/project_a4_logistik/urls.py @@ -20,5 +20,6 @@ urlpatterns = [ path('', include('example_app.urls')), path('admin/', admin.site.urls), path('example_app/', include('example_app.urls')), - path('pr/', include('purchase_requisition.urls')) + path('pr/', include('purchase_requisition.urls')), + path('detail_item/', include('detail_item.urls')) ]