From d47f8b48935d258f4c5c3e2267911753bebd5214 Mon Sep 17 00:00:00 2001
From: Biswakalyan Bhuyan <biswa@surgot.in>
Date: Mon, 14 Nov 2022 16:43:12 +0530
Subject: id card

---
 .../site-packages/pkg_resources/__init__.py        | 3296 -----------
 .../__pycache__/__init__.cpython-310.pyc           |  Bin 100546 -> 0 bytes
 .../pkg_resources/_vendor/__init__.py              |    0
 .../_vendor/__pycache__/__init__.cpython-310.pyc   |  Bin 178 -> 0 bytes
 .../_vendor/__pycache__/appdirs.cpython-310.pyc    |  Bin 20239 -> 0 bytes
 .../_vendor/__pycache__/zipp.cpython-310.pyc       |  Bin 10221 -> 0 bytes
 .../site-packages/pkg_resources/_vendor/appdirs.py |  608 --
 .../_vendor/importlib_resources/__init__.py        |   36 -
 .../__pycache__/__init__.cpython-310.pyc           |  Bin 633 -> 0 bytes
 .../__pycache__/_adapters.cpython-310.pyc          |  Bin 7341 -> 0 bytes
 .../__pycache__/_common.cpython-310.pyc            |  Bin 2631 -> 0 bytes
 .../__pycache__/_compat.cpython-310.pyc            |  Bin 3491 -> 0 bytes
 .../__pycache__/_itertools.cpython-310.pyc         |  Bin 878 -> 0 bytes
 .../__pycache__/_legacy.cpython-310.pyc            |  Bin 4214 -> 0 bytes
 .../__pycache__/abc.cpython-310.pyc                |  Bin 5367 -> 0 bytes
 .../__pycache__/readers.cpython-310.pyc            |  Bin 5451 -> 0 bytes
 .../__pycache__/simple.cpython-310.pyc             |  Bin 4715 -> 0 bytes
 .../_vendor/importlib_resources/_adapters.py       |  170 -
 .../_vendor/importlib_resources/_common.py         |  104 -
 .../_vendor/importlib_resources/_compat.py         |   98 -
 .../_vendor/importlib_resources/_itertools.py      |   35 -
 .../_vendor/importlib_resources/_legacy.py         |  121 -
 .../_vendor/importlib_resources/abc.py             |  137 -
 .../_vendor/importlib_resources/readers.py         |  122 -
 .../_vendor/importlib_resources/simple.py          |  116 -
 .../pkg_resources/_vendor/jaraco/__init__.py       |    0
 .../jaraco/__pycache__/__init__.cpython-310.pyc    |  Bin 185 -> 0 bytes
 .../jaraco/__pycache__/context.cpython-310.pyc     |  Bin 6317 -> 0 bytes
 .../jaraco/__pycache__/functools.cpython-310.pyc   |  Bin 15609 -> 0 bytes
 .../pkg_resources/_vendor/jaraco/context.py        |  213 -
 .../pkg_resources/_vendor/jaraco/functools.py      |  525 --
 .../pkg_resources/_vendor/jaraco/text/__init__.py  |  599 --
 .../text/__pycache__/__init__.cpython-310.pyc      |  Bin 19654 -> 0 bytes
 .../_vendor/more_itertools/__init__.py             |    4 -
 .../__pycache__/__init__.cpython-310.pyc           |  Bin 260 -> 0 bytes
 .../__pycache__/more.cpython-310.pyc               |  Bin 123033 -> 0 bytes
 .../__pycache__/recipes.cpython-310.pyc            |  Bin 20321 -> 0 bytes
 .../pkg_resources/_vendor/more_itertools/more.py   | 4316 ---------------
 .../_vendor/more_itertools/recipes.py              |  698 ---
 .../pkg_resources/_vendor/packaging/__about__.py   |   26 -
 .../pkg_resources/_vendor/packaging/__init__.py    |   25 -
 .../__pycache__/__about__.cpython-310.pyc          |  Bin 585 -> 0 bytes
 .../packaging/__pycache__/__init__.cpython-310.pyc |  Bin 441 -> 0 bytes
 .../__pycache__/_manylinux.cpython-310.pyc         |  Bin 7295 -> 0 bytes
 .../__pycache__/_musllinux.cpython-310.pyc         |  Bin 4607 -> 0 bytes
 .../__pycache__/_structures.cpython-310.pyc        |  Bin 2700 -> 0 bytes
 .../packaging/__pycache__/markers.cpython-310.pyc  |  Bin 9291 -> 0 bytes
 .../__pycache__/requirements.cpython-310.pyc       |  Bin 3979 -> 0 bytes
 .../__pycache__/specifiers.cpython-310.pyc         |  Bin 21522 -> 0 bytes
 .../packaging/__pycache__/tags.cpython-310.pyc     |  Bin 12185 -> 0 bytes
 .../packaging/__pycache__/utils.cpython-310.pyc    |  Bin 3570 -> 0 bytes
 .../packaging/__pycache__/version.cpython-310.pyc  |  Bin 12920 -> 0 bytes
 .../pkg_resources/_vendor/packaging/_manylinux.py  |  301 -
 .../pkg_resources/_vendor/packaging/_musllinux.py  |  136 -
 .../pkg_resources/_vendor/packaging/_structures.py |   61 -
 .../pkg_resources/_vendor/packaging/markers.py     |  304 -
 .../_vendor/packaging/requirements.py              |  146 -
 .../pkg_resources/_vendor/packaging/specifiers.py  |  802 ---
 .../pkg_resources/_vendor/packaging/tags.py        |  487 --
 .../pkg_resources/_vendor/packaging/utils.py       |  136 -
 .../pkg_resources/_vendor/packaging/version.py     |  504 --
 .../pkg_resources/_vendor/pyparsing/__init__.py    |  331 --
 .../pyparsing/__pycache__/__init__.cpython-310.pyc |  Bin 7112 -> 0 bytes
 .../pyparsing/__pycache__/actions.cpython-310.pyc  |  Bin 7177 -> 0 bytes
 .../pyparsing/__pycache__/common.cpython-310.pyc   |  Bin 10100 -> 0 bytes
 .../pyparsing/__pycache__/core.cpython-310.pyc     |  Bin 176325 -> 0 bytes
 .../__pycache__/exceptions.cpython-310.pyc         |  Bin 9067 -> 0 bytes
 .../pyparsing/__pycache__/helpers.cpython-310.pyc  |  Bin 35244 -> 0 bytes
 .../pyparsing/__pycache__/results.cpython-310.pyc  |  Bin 24788 -> 0 bytes
 .../pyparsing/__pycache__/testing.cpython-310.pyc  |  Bin 12095 -> 0 bytes
 .../pyparsing/__pycache__/unicode.cpython-310.pyc  |  Bin 9809 -> 0 bytes
 .../pyparsing/__pycache__/util.cpython-310.pyc     |  Bin 8600 -> 0 bytes
 .../pkg_resources/_vendor/pyparsing/actions.py     |  207 -
 .../pkg_resources/_vendor/pyparsing/common.py      |  424 --
 .../pkg_resources/_vendor/pyparsing/core.py        | 5812 --------------------
 .../_vendor/pyparsing/diagram/__init__.py          |  611 --
 .../diagram/__pycache__/__init__.cpython-310.pyc   |  Bin 16046 -> 0 bytes
 .../pkg_resources/_vendor/pyparsing/exceptions.py  |  267 -
 .../pkg_resources/_vendor/pyparsing/helpers.py     | 1083 ----
 .../pkg_resources/_vendor/pyparsing/results.py     |  760 ---
 .../pkg_resources/_vendor/pyparsing/testing.py     |  331 --
 .../pkg_resources/_vendor/pyparsing/unicode.py     |  332 --
 .../pkg_resources/_vendor/pyparsing/util.py        |  235 -
 .../site-packages/pkg_resources/_vendor/zipp.py    |  329 --
 .../site-packages/pkg_resources/extern/__init__.py |   76 -
 .../extern/__pycache__/__init__.cpython-310.pyc    |  Bin 2931 -> 0 bytes
 86 files changed, 24924 deletions(-)
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/__init__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/__init__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/zipp.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/__init__.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_adapters.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_common.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_compat.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_itertools.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_legacy.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/abc.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/readers.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/simple.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_common.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/abc.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/readers.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/simple.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__init__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/__init__.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/context.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/functools.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/context.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/functools.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__pycache__/__init__.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__init__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/__init__.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/more.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/recipes.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/more.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/recipes.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__init__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/__init__.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/actions.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/common.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/core.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/exceptions.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/helpers.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/results.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/testing.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/unicode.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/util.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/actions.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/common.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/core.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-310.pyc
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/exceptions.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/helpers.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/results.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/testing.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/unicode.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/util.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/_vendor/zipp.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/extern/__init__.py
 delete mode 100644 env/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc

(limited to 'env/lib/python3.10/site-packages/pkg_resources')

diff --git a/env/lib/python3.10/site-packages/pkg_resources/__init__.py b/env/lib/python3.10/site-packages/pkg_resources/__init__.py
deleted file mode 100644
index d59226a..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/__init__.py
+++ /dev/null
@@ -1,3296 +0,0 @@
-"""
-Package resource API
---------------------
-
-A resource is a logical file contained within a package, or a logical
-subdirectory thereof.  The package resource API expects resource names
-to have their path parts separated with ``/``, *not* whatever the local
-path separator is.  Do not use os.path operations to manipulate resource
-names being passed into the API.
-
-The package resource API is designed to work with normal filesystem packages,
-.egg files, and unpacked .egg files.  It can also work in a limited way with
-.zip files and with custom PEP 302 loaders that support the ``get_data()``
-method.
-"""
-
-import sys
-import os
-import io
-import time
-import re
-import types
-import zipfile
-import zipimport
-import warnings
-import stat
-import functools
-import pkgutil
-import operator
-import platform
-import collections
-import plistlib
-import email.parser
-import errno
-import tempfile
-import textwrap
-import itertools
-import inspect
-import ntpath
-import posixpath
-import importlib
-from pkgutil import get_importer
-
-try:
-    import _imp
-except ImportError:
-    # Python 3.2 compatibility
-    import imp as _imp
-
-try:
-    FileExistsError
-except NameError:
-    FileExistsError = OSError
-
-# capture these to bypass sandboxing
-from os import utime
-try:
-    from os import mkdir, rename, unlink
-    WRITE_SUPPORT = True
-except ImportError:
-    # no write support, probably under GAE
-    WRITE_SUPPORT = False
-
-from os import open as os_open
-from os.path import isdir, split
-
-try:
-    import importlib.machinery as importlib_machinery
-    # access attribute to force import under delayed import mechanisms.
-    importlib_machinery.__name__
-except ImportError:
-    importlib_machinery = None
-
-from pkg_resources.extern.jaraco.text import (
-    yield_lines,
-    drop_comment,
-    join_continuation,
-)
-
-from pkg_resources.extern import appdirs
-from pkg_resources.extern import packaging
-__import__('pkg_resources.extern.packaging.version')
-__import__('pkg_resources.extern.packaging.specifiers')
-__import__('pkg_resources.extern.packaging.requirements')
-__import__('pkg_resources.extern.packaging.markers')
-__import__('pkg_resources.extern.packaging.utils')
-
-if sys.version_info < (3, 5):
-    raise RuntimeError("Python 3.5 or later is required")
-
-# declare some globals that will be defined later to
-# satisfy the linters.
-require = None
-working_set = None
-add_activation_listener = None
-resources_stream = None
-cleanup_resources = None
-resource_dir = None
-resource_stream = None
-set_extraction_path = None
-resource_isdir = None
-resource_string = None
-iter_entry_points = None
-resource_listdir = None
-resource_filename = None
-resource_exists = None
-_distribution_finders = None
-_namespace_handlers = None
-_namespace_packages = None
-
-
-class PEP440Warning(RuntimeWarning):
-    """
-    Used when there is an issue with a version or specifier not complying with
-    PEP 440.
-    """
-
-
-def parse_version(v):
-    try:
-        return packaging.version.Version(v)
-    except packaging.version.InvalidVersion:
-        warnings.warn(
-            f"{v} is an invalid version and will not be supported in "
-            "a future release",
-            PkgResourcesDeprecationWarning,
-        )
-        return packaging.version.LegacyVersion(v)
-
-
-_state_vars = {}
-
-
-def _declare_state(vartype, **kw):
-    globals().update(kw)
-    _state_vars.update(dict.fromkeys(kw, vartype))
-
-
-def __getstate__():
-    state = {}
-    g = globals()
-    for k, v in _state_vars.items():
-        state[k] = g['_sget_' + v](g[k])
-    return state
-
-
-def __setstate__(state):
-    g = globals()
-    for k, v in state.items():
-        g['_sset_' + _state_vars[k]](k, g[k], v)
-    return state
-
-
-def _sget_dict(val):
-    return val.copy()
-
-
-def _sset_dict(key, ob, state):
-    ob.clear()
-    ob.update(state)
-
-
-def _sget_object(val):
-    return val.__getstate__()
-
-
-def _sset_object(key, ob, state):
-    ob.__setstate__(state)
-
-
-_sget_none = _sset_none = lambda *args: None
-
-
-def get_supported_platform():
-    """Return this platform's maximum compatible version.
-
-    distutils.util.get_platform() normally reports the minimum version
-    of macOS that would be required to *use* extensions produced by
-    distutils.  But what we want when checking compatibility is to know the
-    version of macOS that we are *running*.  To allow usage of packages that
-    explicitly require a newer version of macOS, we must also know the
-    current version of the OS.
-
-    If this condition occurs for any other platform with a version in its
-    platform strings, this function should be extended accordingly.
-    """
-    plat = get_build_platform()
-    m = macosVersionString.match(plat)
-    if m is not None and sys.platform == "darwin":
-        try:
-            plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
-        except ValueError:
-            # not macOS
-            pass
-    return plat
-
-
-__all__ = [
-    # Basic resource access and distribution/entry point discovery
-    'require', 'run_script', 'get_provider', 'get_distribution',
-    'load_entry_point', 'get_entry_map', 'get_entry_info',
-    'iter_entry_points',
-    'resource_string', 'resource_stream', 'resource_filename',
-    'resource_listdir', 'resource_exists', 'resource_isdir',
-
-    # Environmental control
-    'declare_namespace', 'working_set', 'add_activation_listener',
-    'find_distributions', 'set_extraction_path', 'cleanup_resources',
-    'get_default_cache',
-
-    # Primary implementation classes
-    'Environment', 'WorkingSet', 'ResourceManager',
-    'Distribution', 'Requirement', 'EntryPoint',
-
-    # Exceptions
-    'ResolutionError', 'VersionConflict', 'DistributionNotFound',
-    'UnknownExtra', 'ExtractionError',
-
-    # Warnings
-    'PEP440Warning',
-
-    # Parsing functions and string utilities
-    'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
-    'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
-    'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
-
-    # filesystem utilities
-    'ensure_directory', 'normalize_path',
-
-    # Distribution "precedence" constants
-    'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
-
-    # "Provider" interfaces, implementations, and registration/lookup APIs
-    'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
-    'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
-    'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
-    'register_finder', 'register_namespace_handler', 'register_loader_type',
-    'fixup_namespace_packages', 'get_importer',
-
-    # Warnings
-    'PkgResourcesDeprecationWarning',
-
-    # Deprecated/backward compatibility only
-    'run_main', 'AvailableDistributions',
-]
-
-
-class ResolutionError(Exception):
-    """Abstract base for dependency resolution errors"""
-
-    def __repr__(self):
-        return self.__class__.__name__ + repr(self.args)
-
-
-class VersionConflict(ResolutionError):
-    """
-    An already-installed version conflicts with the requested version.
-
-    Should be initialized with the installed Distribution and the requested
-    Requirement.
-    """
-
-    _template = "{self.dist} is installed but {self.req} is required"
-
-    @property
-    def dist(self):
-        return self.args[0]
-
-    @property
-    def req(self):
-        return self.args[1]
-
-    def report(self):
-        return self._template.format(**locals())
-
-    def with_context(self, required_by):
-        """
-        If required_by is non-empty, return a version of self that is a
-        ContextualVersionConflict.
-        """
-        if not required_by:
-            return self
-        args = self.args + (required_by,)
-        return ContextualVersionConflict(*args)
-
-
-class ContextualVersionConflict(VersionConflict):
-    """
-    A VersionConflict that accepts a third parameter, the set of the
-    requirements that required the installed Distribution.
-    """
-
-    _template = VersionConflict._template + ' by {self.required_by}'
-
-    @property
-    def required_by(self):
-        return self.args[2]
-
-
-class DistributionNotFound(ResolutionError):
-    """A requested distribution was not found"""
-
-    _template = ("The '{self.req}' distribution was not found "
-                 "and is required by {self.requirers_str}")
-
-    @property
-    def req(self):
-        return self.args[0]
-
-    @property
-    def requirers(self):
-        return self.args[1]
-
-    @property
-    def requirers_str(self):
-        if not self.requirers:
-            return 'the application'
-        return ', '.join(self.requirers)
-
-    def report(self):
-        return self._template.format(**locals())
-
-    def __str__(self):
-        return self.report()
-
-
-class UnknownExtra(ResolutionError):
-    """Distribution doesn't have an "extra feature" of the given name"""
-
-
-_provider_factories = {}
-
-PY_MAJOR = '{}.{}'.format(*sys.version_info)
-EGG_DIST = 3
-BINARY_DIST = 2
-SOURCE_DIST = 1
-CHECKOUT_DIST = 0
-DEVELOP_DIST = -1
-
-
-def register_loader_type(loader_type, provider_factory):
-    """Register `provider_factory` to make providers for `loader_type`
-
-    `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
-    and `provider_factory` is a function that, passed a *module* object,
-    returns an ``IResourceProvider`` for that module.
-    """
-    _provider_factories[loader_type] = provider_factory
-
-
-def get_provider(moduleOrReq):
-    """Return an IResourceProvider for the named module or requirement"""
-    if isinstance(moduleOrReq, Requirement):
-        return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
-    try:
-        module = sys.modules[moduleOrReq]
-    except KeyError:
-        __import__(moduleOrReq)
-        module = sys.modules[moduleOrReq]
-    loader = getattr(module, '__loader__', None)
-    return _find_adapter(_provider_factories, loader)(module)
-
-
-def _macos_vers(_cache=[]):
-    if not _cache:
-        version = platform.mac_ver()[0]
-        # fallback for MacPorts
-        if version == '':
-            plist = '/System/Library/CoreServices/SystemVersion.plist'
-            if os.path.exists(plist):
-                if hasattr(plistlib, 'readPlist'):
-                    plist_content = plistlib.readPlist(plist)
-                    if 'ProductVersion' in plist_content:
-                        version = plist_content['ProductVersion']
-
-        _cache.append(version.split('.'))
-    return _cache[0]
-
-
-def _macos_arch(machine):
-    return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
-
-
-def get_build_platform():
-    """Return this platform's string for platform-specific distributions
-
-    XXX Currently this is the same as ``distutils.util.get_platform()``, but it
-    needs some hacks for Linux and macOS.
-    """
-    from sysconfig import get_platform
-
-    plat = get_platform()
-    if sys.platform == "darwin" and not plat.startswith('macosx-'):
-        try:
-            version = _macos_vers()
-            machine = os.uname()[4].replace(" ", "_")
-            return "macosx-%d.%d-%s" % (
-                int(version[0]), int(version[1]),
-                _macos_arch(machine),
-            )
-        except ValueError:
-            # if someone is running a non-Mac darwin system, this will fall
-            # through to the default implementation
-            pass
-    return plat
-
-
-macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
-darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
-# XXX backward compat
-get_platform = get_build_platform
-
-
-def compatible_platforms(provided, required):
-    """Can code for the `provided` platform run on the `required` platform?
-
-    Returns true if either platform is ``None``, or the platforms are equal.
-
-    XXX Needs compatibility checks for Linux and other unixy OSes.
-    """
-    if provided is None or required is None or provided == required:
-        # easy case
-        return True
-
-    # macOS special cases
-    reqMac = macosVersionString.match(required)
-    if reqMac:
-        provMac = macosVersionString.match(provided)
-
-        # is this a Mac package?
-        if not provMac:
-            # this is backwards compatibility for packages built before
-            # setuptools 0.6. All packages built after this point will
-            # use the new macOS designation.
-            provDarwin = darwinVersionString.match(provided)
-            if provDarwin:
-                dversion = int(provDarwin.group(1))
-                macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
-                if dversion == 7 and macosversion >= "10.3" or \
-                        dversion == 8 and macosversion >= "10.4":
-                    return True
-            # egg isn't macOS or legacy darwin
-            return False
-
-        # are they the same major version and machine type?
-        if provMac.group(1) != reqMac.group(1) or \
-                provMac.group(3) != reqMac.group(3):
-            return False
-
-        # is the required OS major update >= the provided one?
-        if int(provMac.group(2)) > int(reqMac.group(2)):
-            return False
-
-        return True
-
-    # XXX Linux and other platforms' special cases should go here
-    return False
-
-
-def run_script(dist_spec, script_name):
-    """Locate distribution `dist_spec` and run its `script_name` script"""
-    ns = sys._getframe(1).f_globals
-    name = ns['__name__']
-    ns.clear()
-    ns['__name__'] = name
-    require(dist_spec)[0].run_script(script_name, ns)
-
-
-# backward compatibility
-run_main = run_script
-
-
-def get_distribution(dist):
-    """Return a current distribution object for a Requirement or string"""
-    if isinstance(dist, str):
-        dist = Requirement.parse(dist)
-    if isinstance(dist, Requirement):
-        dist = get_provider(dist)
-    if not isinstance(dist, Distribution):
-        raise TypeError("Expected string, Requirement, or Distribution", dist)
-    return dist
-
-
-def load_entry_point(dist, group, name):
-    """Return `name` entry point of `group` for `dist` or raise ImportError"""
-    return get_distribution(dist).load_entry_point(group, name)
-
-
-def get_entry_map(dist, group=None):
-    """Return the entry point map for `group`, or the full entry map"""
-    return get_distribution(dist).get_entry_map(group)
-
-
-def get_entry_info(dist, group, name):
-    """Return the EntryPoint object for `group`+`name`, or ``None``"""
-    return get_distribution(dist).get_entry_info(group, name)
-
-
-class IMetadataProvider:
-    def has_metadata(name):
-        """Does the package's distribution contain the named metadata?"""
-
-    def get_metadata(name):
-        """The named metadata resource as a string"""
-
-    def get_metadata_lines(name):
-        """Yield named metadata resource as list of non-blank non-comment lines
-
-       Leading and trailing whitespace is stripped from each line, and lines
-       with ``#`` as the first non-blank character are omitted."""
-
-    def metadata_isdir(name):
-        """Is the named metadata a directory?  (like ``os.path.isdir()``)"""
-
-    def metadata_listdir(name):
-        """List of metadata names in the directory (like ``os.listdir()``)"""
-
-    def run_script(script_name, namespace):
-        """Execute the named script in the supplied namespace dictionary"""
-
-
-class IResourceProvider(IMetadataProvider):
-    """An object that provides access to package resources"""
-
-    def get_resource_filename(manager, resource_name):
-        """Return a true filesystem path for `resource_name`
-
-        `manager` must be an ``IResourceManager``"""
-
-    def get_resource_stream(manager, resource_name):
-        """Return a readable file-like object for `resource_name`
-
-        `manager` must be an ``IResourceManager``"""
-
-    def get_resource_string(manager, resource_name):
-        """Return a string containing the contents of `resource_name`
-
-        `manager` must be an ``IResourceManager``"""
-
-    def has_resource(resource_name):
-        """Does the package contain the named resource?"""
-
-    def resource_isdir(resource_name):
-        """Is the named resource a directory?  (like ``os.path.isdir()``)"""
-
-    def resource_listdir(resource_name):
-        """List of resource names in the directory (like ``os.listdir()``)"""
-
-
-class WorkingSet:
-    """A collection of active distributions on sys.path (or a similar list)"""
-
-    def __init__(self, entries=None):
-        """Create working set from list of path entries (default=sys.path)"""
-        self.entries = []
-        self.entry_keys = {}
-        self.by_key = {}
-        self.normalized_to_canonical_keys = {}
-        self.callbacks = []
-
-        if entries is None:
-            entries = sys.path
-
-        for entry in entries:
-            self.add_entry(entry)
-
-    @classmethod
-    def _build_master(cls):
-        """
-        Prepare the master working set.
-        """
-        ws = cls()
-        try:
-            from __main__ import __requires__
-        except ImportError:
-            # The main program does not list any requirements
-            return ws
-
-        # ensure the requirements are met
-        try:
-            ws.require(__requires__)
-        except VersionConflict:
-            return cls._build_from_requirements(__requires__)
-
-        return ws
-
-    @classmethod
-    def _build_from_requirements(cls, req_spec):
-        """
-        Build a working set from a requirement spec. Rewrites sys.path.
-        """
-        # try it without defaults already on sys.path
-        # by starting with an empty path
-        ws = cls([])
-        reqs = parse_requirements(req_spec)
-        dists = ws.resolve(reqs, Environment())
-        for dist in dists:
-            ws.add(dist)
-
-        # add any missing entries from sys.path
-        for entry in sys.path:
-            if entry not in ws.entries:
-                ws.add_entry(entry)
-
-        # then copy back to sys.path
-        sys.path[:] = ws.entries
-        return ws
-
-    def add_entry(self, entry):
-        """Add a path item to ``.entries``, finding any distributions on it
-
-        ``find_distributions(entry, True)`` is used to find distributions
-        corresponding to the path entry, and they are added.  `entry` is
-        always appended to ``.entries``, even if it is already present.
-        (This is because ``sys.path`` can contain the same value more than
-        once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
-        equal ``sys.path``.)
-        """
-        self.entry_keys.setdefault(entry, [])
-        self.entries.append(entry)
-        for dist in find_distributions(entry, True):
-            self.add(dist, entry, False)
-
-    def __contains__(self, dist):
-        """True if `dist` is the active distribution for its project"""
-        return self.by_key.get(dist.key) == dist
-
-    def find(self, req):
-        """Find a distribution matching requirement `req`
-
-        If there is an active distribution for the requested project, this
-        returns it as long as it meets the version requirement specified by
-        `req`.  But, if there is an active distribution for the project and it
-        does *not* meet the `req` requirement, ``VersionConflict`` is raised.
-        If there is no active distribution for the requested project, ``None``
-        is returned.
-        """
-        dist = self.by_key.get(req.key)
-
-        if dist is None:
-            canonical_key = self.normalized_to_canonical_keys.get(req.key)
-
-            if canonical_key is not None:
-                req.key = canonical_key
-                dist = self.by_key.get(canonical_key)
-
-        if dist is not None and dist not in req:
-            # XXX add more info
-            raise VersionConflict(dist, req)
-        return dist
-
-    def iter_entry_points(self, group, name=None):
-        """Yield entry point objects from `group` matching `name`
-
-        If `name` is None, yields all entry points in `group` from all
-        distributions in the working set, otherwise only ones matching
-        both `group` and `name` are yielded (in distribution order).
-        """
-        return (
-            entry
-            for dist in self
-            for entry in dist.get_entry_map(group).values()
-            if name is None or name == entry.name
-        )
-
-    def run_script(self, requires, script_name):
-        """Locate distribution for `requires` and run `script_name` script"""
-        ns = sys._getframe(1).f_globals
-        name = ns['__name__']
-        ns.clear()
-        ns['__name__'] = name
-        self.require(requires)[0].run_script(script_name, ns)
-
-    def __iter__(self):
-        """Yield distributions for non-duplicate projects in the working set
-
-        The yield order is the order in which the items' path entries were
-        added to the working set.
-        """
-        seen = {}
-        for item in self.entries:
-            if item not in self.entry_keys:
-                # workaround a cache issue
-                continue
-
-            for key in self.entry_keys[item]:
-                if key not in seen:
-                    seen[key] = 1
-                    yield self.by_key[key]
-
-    def add(self, dist, entry=None, insert=True, replace=False):
-        """Add `dist` to working set, associated with `entry`
-
-        If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
-        On exit from this routine, `entry` is added to the end of the working
-        set's ``.entries`` (if it wasn't already present).
-
-        `dist` is only added to the working set if it's for a project that
-        doesn't already have a distribution in the set, unless `replace=True`.
-        If it's added, any callbacks registered with the ``subscribe()`` method
-        will be called.
-        """
-        if insert:
-            dist.insert_on(self.entries, entry, replace=replace)
-
-        if entry is None:
-            entry = dist.location
-        keys = self.entry_keys.setdefault(entry, [])
-        keys2 = self.entry_keys.setdefault(dist.location, [])
-        if not replace and dist.key in self.by_key:
-            # ignore hidden distros
-            return
-
-        self.by_key[dist.key] = dist
-        normalized_name = packaging.utils.canonicalize_name(dist.key)
-        self.normalized_to_canonical_keys[normalized_name] = dist.key
-        if dist.key not in keys:
-            keys.append(dist.key)
-        if dist.key not in keys2:
-            keys2.append(dist.key)
-        self._added_new(dist)
-
-    # FIXME: 'WorkingSet.resolve' is too complex (11)
-    def resolve(self, requirements, env=None, installer=None,  # noqa: C901
-                replace_conflicting=False, extras=None):
-        """List all distributions needed to (recursively) meet `requirements`
-
-        `requirements` must be a sequence of ``Requirement`` objects.  `env`,
-        if supplied, should be an ``Environment`` instance.  If
-        not supplied, it defaults to all distributions available within any
-        entry or distribution in the working set.  `installer`, if supplied,
-        will be invoked with each requirement that cannot be met by an
-        already-installed distribution; it should return a ``Distribution`` or
-        ``None``.
-
-        Unless `replace_conflicting=True`, raises a VersionConflict exception
-        if
-        any requirements are found on the path that have the correct name but
-        the wrong version.  Otherwise, if an `installer` is supplied it will be
-        invoked to obtain the correct version of the requirement and activate
-        it.
-
-        `extras` is a list of the extras to be used with these requirements.
-        This is important because extra requirements may look like `my_req;
-        extra = "my_extra"`, which would otherwise be interpreted as a purely
-        optional requirement.  Instead, we want to be able to assert that these
-        requirements are truly required.
-        """
-
-        # set up the stack
-        requirements = list(requirements)[::-1]
-        # set of processed requirements
-        processed = {}
-        # key -> dist
-        best = {}
-        to_activate = []
-
-        req_extras = _ReqExtras()
-
-        # Mapping of requirement to set of distributions that required it;
-        # useful for reporting info about conflicts.
-        required_by = collections.defaultdict(set)
-
-        while requirements:
-            # process dependencies breadth-first
-            req = requirements.pop(0)
-            if req in processed:
-                # Ignore cyclic or redundant dependencies
-                continue
-
-            if not req_extras.markers_pass(req, extras):
-                continue
-
-            dist = best.get(req.key)
-            if dist is None:
-                # Find the best distribution and add it to the map
-                dist = self.by_key.get(req.key)
-                if dist is None or (dist not in req and replace_conflicting):
-                    ws = self
-                    if env is None:
-                        if dist is None:
-                            env = Environment(self.entries)
-                        else:
-                            # Use an empty environment and workingset to avoid
-                            # any further conflicts with the conflicting
-                            # distribution
-                            env = Environment([])
-                            ws = WorkingSet([])
-                    dist = best[req.key] = env.best_match(
-                        req, ws, installer,
-                        replace_conflicting=replace_conflicting
-                    )
-                    if dist is None:
-                        requirers = required_by.get(req, None)
-                        raise DistributionNotFound(req, requirers)
-                to_activate.append(dist)
-            if dist not in req:
-                # Oops, the "best" so far conflicts with a dependency
-                dependent_req = required_by[req]
-                raise VersionConflict(dist, req).with_context(dependent_req)
-
-            # push the new requirements onto the stack
-            new_requirements = dist.requires(req.extras)[::-1]
-            requirements.extend(new_requirements)
-
-            # Register the new requirements needed by req
-            for new_requirement in new_requirements:
-                required_by[new_requirement].add(req.project_name)
-                req_extras[new_requirement] = req.extras
-
-            processed[req] = True
-
-        # return list of distros to activate
-        return to_activate
-
-    def find_plugins(
-            self, plugin_env, full_env=None, installer=None, fallback=True):
-        """Find all activatable distributions in `plugin_env`
-
-        Example usage::
-
-            distributions, errors = working_set.find_plugins(
-                Environment(plugin_dirlist)
-            )
-            # add plugins+libs to sys.path
-            map(working_set.add, distributions)
-            # display errors
-            print('Could not load', errors)
-
-        The `plugin_env` should be an ``Environment`` instance that contains
-        only distributions that are in the project's "plugin directory" or
-        directories. The `full_env`, if supplied, should be an ``Environment``
-        contains all currently-available distributions.  If `full_env` is not
-        supplied, one is created automatically from the ``WorkingSet`` this
-        method is called on, which will typically mean that every directory on
-        ``sys.path`` will be scanned for distributions.
-
-        `installer` is a standard installer callback as used by the
-        ``resolve()`` method. The `fallback` flag indicates whether we should
-        attempt to resolve older versions of a plugin if the newest version
-        cannot be resolved.
-
-        This method returns a 2-tuple: (`distributions`, `error_info`), where
-        `distributions` is a list of the distributions found in `plugin_env`
-        that were loadable, along with any other distributions that are needed
-        to resolve their dependencies.  `error_info` is a dictionary mapping
-        unloadable plugin distributions to an exception instance describing the
-        error that occurred. Usually this will be a ``DistributionNotFound`` or
-        ``VersionConflict`` instance.
-        """
-
-        plugin_projects = list(plugin_env)
-        # scan project names in alphabetic order
-        plugin_projects.sort()
-
-        error_info = {}
-        distributions = {}
-
-        if full_env is None:
-            env = Environment(self.entries)
-            env += plugin_env
-        else:
-            env = full_env + plugin_env
-
-        shadow_set = self.__class__([])
-        # put all our entries in shadow_set
-        list(map(shadow_set.add, self))
-
-        for project_name in plugin_projects:
-
-            for dist in plugin_env[project_name]:
-
-                req = [dist.as_requirement()]
-
-                try:
-                    resolvees = shadow_set.resolve(req, env, installer)
-
-                except ResolutionError as v:
-                    # save error info
-                    error_info[dist] = v
-                    if fallback:
-                        # try the next older version of project
-                        continue
-                    else:
-                        # give up on this project, keep going
-                        break
-
-                else:
-                    list(map(shadow_set.add, resolvees))
-                    distributions.update(dict.fromkeys(resolvees))
-
-                    # success, no need to try any more versions of this project
-                    break
-
-        distributions = list(distributions)
-        distributions.sort()
-
-        return distributions, error_info
-
-    def require(self, *requirements):
-        """Ensure that distributions matching `requirements` are activated
-
-        `requirements` must be a string or a (possibly-nested) sequence
-        thereof, specifying the distributions and versions required.  The
-        return value is a sequence of the distributions that needed to be
-        activated to fulfill the requirements; all relevant distributions are
-        included, even if they were already activated in this working set.
-        """
-        needed = self.resolve(parse_requirements(requirements))
-
-        for dist in needed:
-            self.add(dist)
-
-        return needed
-
-    def subscribe(self, callback, existing=True):
-        """Invoke `callback` for all distributions
-
-        If `existing=True` (default),
-        call on all existing ones, as well.
-        """
-        if callback in self.callbacks:
-            return
-        self.callbacks.append(callback)
-        if not existing:
-            return
-        for dist in self:
-            callback(dist)
-
-    def _added_new(self, dist):
-        for callback in self.callbacks:
-            callback(dist)
-
-    def __getstate__(self):
-        return (
-            self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
-            self.normalized_to_canonical_keys.copy(), self.callbacks[:]
-        )
-
-    def __setstate__(self, e_k_b_n_c):
-        entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c
-        self.entries = entries[:]
-        self.entry_keys = keys.copy()
-        self.by_key = by_key.copy()
-        self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy()
-        self.callbacks = callbacks[:]
-
-
-class _ReqExtras(dict):
-    """
-    Map each requirement to the extras that demanded it.
-    """
-
-    def markers_pass(self, req, extras=None):
-        """
-        Evaluate markers for req against each extra that
-        demanded it.
-
-        Return False if the req has a marker and fails
-        evaluation. Otherwise, return True.
-        """
-        extra_evals = (
-            req.marker.evaluate({'extra': extra})
-            for extra in self.get(req, ()) + (extras or (None,))
-        )
-        return not req.marker or any(extra_evals)
-
-
-class Environment:
-    """Searchable snapshot of distributions on a search path"""
-
-    def __init__(
-            self, search_path=None, platform=get_supported_platform(),
-            python=PY_MAJOR):
-        """Snapshot distributions available on a search path
-
-        Any distributions found on `search_path` are added to the environment.
-        `search_path` should be a sequence of ``sys.path`` items.  If not
-        supplied, ``sys.path`` is used.
-
-        `platform` is an optional string specifying the name of the platform
-        that platform-specific distributions must be compatible with.  If
-        unspecified, it defaults to the current platform.  `python` is an
-        optional string naming the desired version of Python (e.g. ``'3.6'``);
-        it defaults to the current version.
-
-        You may explicitly set `platform` (and/or `python`) to ``None`` if you
-        wish to map *all* distributions, not just those compatible with the
-        running platform or Python version.
-        """
-        self._distmap = {}
-        self.platform = platform
-        self.python = python
-        self.scan(search_path)
-
-    def can_add(self, dist):
-        """Is distribution `dist` acceptable for this environment?
-
-        The distribution must match the platform and python version
-        requirements specified when this environment was created, or False
-        is returned.
-        """
-        py_compat = (
-            self.python is None
-            or dist.py_version is None
-            or dist.py_version == self.python
-        )
-        return py_compat and compatible_platforms(dist.platform, self.platform)
-
-    def remove(self, dist):
-        """Remove `dist` from the environment"""
-        self._distmap[dist.key].remove(dist)
-
-    def scan(self, search_path=None):
-        """Scan `search_path` for distributions usable in this environment
-
-        Any distributions found are added to the environment.
-        `search_path` should be a sequence of ``sys.path`` items.  If not
-        supplied, ``sys.path`` is used.  Only distributions conforming to
-        the platform/python version defined at initialization are added.
-        """
-        if search_path is None:
-            search_path = sys.path
-
-        for item in search_path:
-            for dist in find_distributions(item):
-                self.add(dist)
-
-    def __getitem__(self, project_name):
-        """Return a newest-to-oldest list of distributions for `project_name`
-
-        Uses case-insensitive `project_name` comparison, assuming all the
-        project's distributions use their project's name converted to all
-        lowercase as their key.
-
-        """
-        distribution_key = project_name.lower()
-        return self._distmap.get(distribution_key, [])
-
-    def add(self, dist):
-        """Add `dist` if we ``can_add()`` it and it has not already been added
-        """
-        if self.can_add(dist) and dist.has_version():
-            dists = self._distmap.setdefault(dist.key, [])
-            if dist not in dists:
-                dists.append(dist)
-                dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
-
-    def best_match(
-            self, req, working_set, installer=None, replace_conflicting=False):
-        """Find distribution best matching `req` and usable on `working_set`
-
-        This calls the ``find(req)`` method of the `working_set` to see if a
-        suitable distribution is already active.  (This may raise
-        ``VersionConflict`` if an unsuitable version of the project is already
-        active in the specified `working_set`.)  If a suitable distribution
-        isn't active, this method returns the newest distribution in the
-        environment that meets the ``Requirement`` in `req`.  If no suitable
-        distribution is found, and `installer` is supplied, then the result of
-        calling the environment's ``obtain(req, installer)`` method will be
-        returned.
-        """
-        try:
-            dist = working_set.find(req)
-        except VersionConflict:
-            if not replace_conflicting:
-                raise
-            dist = None
-        if dist is not None:
-            return dist
-        for dist in self[req.key]:
-            if dist in req:
-                return dist
-        # try to download/install
-        return self.obtain(req, installer)
-
-    def obtain(self, requirement, installer=None):
-        """Obtain a distribution matching `requirement` (e.g. via download)
-
-        Obtain a distro that matches requirement (e.g. via download).  In the
-        base ``Environment`` class, this routine just returns
-        ``installer(requirement)``, unless `installer` is None, in which case
-        None is returned instead.  This method is a hook that allows subclasses
-        to attempt other ways of obtaining a distribution before falling back
-        to the `installer` argument."""
-        if installer is not None:
-            return installer(requirement)
-
-    def __iter__(self):
-        """Yield the unique project names of the available distributions"""
-        for key in self._distmap.keys():
-            if self[key]:
-                yield key
-
-    def __iadd__(self, other):
-        """In-place addition of a distribution or environment"""
-        if isinstance(other, Distribution):
-            self.add(other)
-        elif isinstance(other, Environment):
-            for project in other:
-                for dist in other[project]:
-                    self.add(dist)
-        else:
-            raise TypeError("Can't add %r to environment" % (other,))
-        return self
-
-    def __add__(self, other):
-        """Add an environment or distribution to an environment"""
-        new = self.__class__([], platform=None, python=None)
-        for env in self, other:
-            new += env
-        return new
-
-
-# XXX backward compatibility
-AvailableDistributions = Environment
-
-
-class ExtractionError(RuntimeError):
-    """An error occurred extracting a resource
-
-    The following attributes are available from instances of this exception:
-
-    manager
-        The resource manager that raised this exception
-
-    cache_path
-        The base directory for resource extraction
-
-    original_error
-        The exception instance that caused extraction to fail
-    """
-
-
-class ResourceManager:
-    """Manage resource extraction and packages"""
-    extraction_path = None
-
-    def __init__(self):
-        self.cached_files = {}
-
-    def resource_exists(self, package_or_requirement, resource_name):
-        """Does the named resource exist?"""
-        return get_provider(package_or_requirement).has_resource(resource_name)
-
-    def resource_isdir(self, package_or_requirement, resource_name):
-        """Is the named resource an existing directory?"""
-        return get_provider(package_or_requirement).resource_isdir(
-            resource_name
-        )
-
-    def resource_filename(self, package_or_requirement, resource_name):
-        """Return a true filesystem path for specified resource"""
-        return get_provider(package_or_requirement).get_resource_filename(
-            self, resource_name
-        )
-
-    def resource_stream(self, package_or_requirement, resource_name):
-        """Return a readable file-like object for specified resource"""
-        return get_provider(package_or_requirement).get_resource_stream(
-            self, resource_name
-        )
-
-    def resource_string(self, package_or_requirement, resource_name):
-        """Return specified resource as a string"""
-        return get_provider(package_or_requirement).get_resource_string(
-            self, resource_name
-        )
-
-    def resource_listdir(self, package_or_requirement, resource_name):
-        """List the contents of the named resource directory"""
-        return get_provider(package_or_requirement).resource_listdir(
-            resource_name
-        )
-
-    def extraction_error(self):
-        """Give an error message for problems extracting file(s)"""
-
-        old_exc = sys.exc_info()[1]
-        cache_path = self.extraction_path or get_default_cache()
-
-        tmpl = textwrap.dedent("""
-            Can't extract file(s) to egg cache
-
-            The following error occurred while trying to extract file(s)
-            to the Python egg cache:
-
-              {old_exc}
-
-            The Python egg cache directory is currently set to:
-
-              {cache_path}
-
-            Perhaps your account does not have write access to this directory?
-            You can change the cache directory by setting the PYTHON_EGG_CACHE
-            environment variable to point to an accessible directory.
-            """).lstrip()
-        err = ExtractionError(tmpl.format(**locals()))
-        err.manager = self
-        err.cache_path = cache_path
-        err.original_error = old_exc
-        raise err
-
-    def get_cache_path(self, archive_name, names=()):
-        """Return absolute location in cache for `archive_name` and `names`
-
-        The parent directory of the resulting path will be created if it does
-        not already exist.  `archive_name` should be the base filename of the
-        enclosing egg (which may not be the name of the enclosing zipfile!),
-        including its ".egg" extension.  `names`, if provided, should be a
-        sequence of path name parts "under" the egg's extraction location.
-
-        This method should only be called by resource providers that need to
-        obtain an extraction location, and only for names they intend to
-        extract, as it tracks the generated names for possible cleanup later.
-        """
-        extract_path = self.extraction_path or get_default_cache()
-        target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
-        try:
-            _bypass_ensure_directory(target_path)
-        except Exception:
-            self.extraction_error()
-
-        self._warn_unsafe_extraction_path(extract_path)
-
-        self.cached_files[target_path] = 1
-        return target_path
-
-    @staticmethod
-    def _warn_unsafe_extraction_path(path):
-        """
-        If the default extraction path is overridden and set to an insecure
-        location, such as /tmp, it opens up an opportunity for an attacker to
-        replace an extracted file with an unauthorized payload. Warn the user
-        if a known insecure location is used.
-
-        See Distribute #375 for more details.
-        """
-        if os.name == 'nt' and not path.startswith(os.environ['windir']):
-            # On Windows, permissions are generally restrictive by default
-            #  and temp directories are not writable by other users, so
-            #  bypass the warning.
-            return
-        mode = os.stat(path).st_mode
-        if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
-            msg = (
-                "Extraction path is writable by group/others "
-                "and vulnerable to attack when "
-                "used with get_resource_filename ({path}). "
-                "Consider a more secure "
-                "location (set with .set_extraction_path or the "
-                "PYTHON_EGG_CACHE environment variable)."
-            ).format(**locals())
-            warnings.warn(msg, UserWarning)
-
-    def postprocess(self, tempname, filename):
-        """Perform any platform-specific postprocessing of `tempname`
-
-        This is where Mac header rewrites should be done; other platforms don't
-        have anything special they should do.
-
-        Resource providers should call this method ONLY after successfully
-        extracting a compressed resource.  They must NOT call it on resources
-        that are already in the filesystem.
-
-        `tempname` is the current (temporary) name of the file, and `filename`
-        is the name it will be renamed to by the caller after this routine
-        returns.
-        """
-
-        if os.name == 'posix':
-            # Make the resource executable
-            mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
-            os.chmod(tempname, mode)
-
-    def set_extraction_path(self, path):
-        """Set the base path where resources will be extracted to, if needed.
-
-        If you do not call this routine before any extractions take place, the
-        path defaults to the return value of ``get_default_cache()``.  (Which
-        is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
-        platform-specific fallbacks.  See that routine's documentation for more
-        details.)
-
-        Resources are extracted to subdirectories of this path based upon
-        information given by the ``IResourceProvider``.  You may set this to a
-        temporary directory, but then you must call ``cleanup_resources()`` to
-        delete the extracted files when done.  There is no guarantee that
-        ``cleanup_resources()`` will be able to remove all extracted files.
-
-        (Note: you may not change the extraction path for a given resource
-        manager once resources have been extracted, unless you first call
-        ``cleanup_resources()``.)
-        """
-        if self.cached_files:
-            raise ValueError(
-                "Can't change extraction path, files already extracted"
-            )
-
-        self.extraction_path = path
-
-    def cleanup_resources(self, force=False):
-        """
-        Delete all extracted resource files and directories, returning a list
-        of the file and directory names that could not be successfully removed.
-        This function does not have any concurrency protection, so it should
-        generally only be called when the extraction path is a temporary
-        directory exclusive to a single process.  This method is not
-        automatically called; you must call it explicitly or register it as an
-        ``atexit`` function if you wish to ensure cleanup of a temporary
-        directory used for extractions.
-        """
-        # XXX
-
-
-def get_default_cache():
-    """
-    Return the ``PYTHON_EGG_CACHE`` environment variable
-    or a platform-relevant user cache dir for an app
-    named "Python-Eggs".
-    """
-    return (
-        os.environ.get('PYTHON_EGG_CACHE')
-        or appdirs.user_cache_dir(appname='Python-Eggs')
-    )
-
-
-def safe_name(name):
-    """Convert an arbitrary string to a standard distribution name
-
-    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
-    """
-    return re.sub('[^A-Za-z0-9.]+', '-', name)
-
-
-def safe_version(version):
-    """
-    Convert an arbitrary string to a standard version string
-    """
-    try:
-        # normalize the version
-        return str(packaging.version.Version(version))
-    except packaging.version.InvalidVersion:
-        version = version.replace(' ', '.')
-        return re.sub('[^A-Za-z0-9.]+', '-', version)
-
-
-def safe_extra(extra):
-    """Convert an arbitrary string to a standard 'extra' name
-
-    Any runs of non-alphanumeric characters are replaced with a single '_',
-    and the result is always lowercased.
-    """
-    return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
-
-
-def to_filename(name):
-    """Convert a project or version name to its filename-escaped form
-
-    Any '-' characters are currently replaced with '_'.
-    """
-    return name.replace('-', '_')
-
-
-def invalid_marker(text):
-    """
-    Validate text as a PEP 508 environment marker; return an exception
-    if invalid or False otherwise.
-    """
-    try:
-        evaluate_marker(text)
-    except SyntaxError as e:
-        e.filename = None
-        e.lineno = None
-        return e
-    return False
-
-
-def evaluate_marker(text, extra=None):
-    """
-    Evaluate a PEP 508 environment marker.
-    Return a boolean indicating the marker result in this environment.
-    Raise SyntaxError if marker is invalid.
-
-    This implementation uses the 'pyparsing' module.
-    """
-    try:
-        marker = packaging.markers.Marker(text)
-        return marker.evaluate()
-    except packaging.markers.InvalidMarker as e:
-        raise SyntaxError(e) from e
-
-
-class NullProvider:
-    """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
-
-    egg_name = None
-    egg_info = None
-    loader = None
-
-    def __init__(self, module):
-        self.loader = getattr(module, '__loader__', None)
-        self.module_path = os.path.dirname(getattr(module, '__file__', ''))
-
-    def get_resource_filename(self, manager, resource_name):
-        return self._fn(self.module_path, resource_name)
-
-    def get_resource_stream(self, manager, resource_name):
-        return io.BytesIO(self.get_resource_string(manager, resource_name))
-
-    def get_resource_string(self, manager, resource_name):
-        return self._get(self._fn(self.module_path, resource_name))
-
-    def has_resource(self, resource_name):
-        return self._has(self._fn(self.module_path, resource_name))
-
-    def _get_metadata_path(self, name):
-        return self._fn(self.egg_info, name)
-
-    def has_metadata(self, name):
-        if not self.egg_info:
-            return self.egg_info
-
-        path = self._get_metadata_path(name)
-        return self._has(path)
-
-    def get_metadata(self, name):
-        if not self.egg_info:
-            return ""
-        path = self._get_metadata_path(name)
-        value = self._get(path)
-        try:
-            return value.decode('utf-8')
-        except UnicodeDecodeError as exc:
-            # Include the path in the error message to simplify
-            # troubleshooting, and without changing the exception type.
-            exc.reason += ' in {} file at path: {}'.format(name, path)
-            raise
-
-    def get_metadata_lines(self, name):
-        return yield_lines(self.get_metadata(name))
-
-    def resource_isdir(self, resource_name):
-        return self._isdir(self._fn(self.module_path, resource_name))
-
-    def metadata_isdir(self, name):
-        return self.egg_info and self._isdir(self._fn(self.egg_info, name))
-
-    def resource_listdir(self, resource_name):
-        return self._listdir(self._fn(self.module_path, resource_name))
-
-    def metadata_listdir(self, name):
-        if self.egg_info:
-            return self._listdir(self._fn(self.egg_info, name))
-        return []
-
-    def run_script(self, script_name, namespace):
-        script = 'scripts/' + script_name
-        if not self.has_metadata(script):
-            raise ResolutionError(
-                "Script {script!r} not found in metadata at {self.egg_info!r}"
-                .format(**locals()),
-            )
-        script_text = self.get_metadata(script).replace('\r\n', '\n')
-        script_text = script_text.replace('\r', '\n')
-        script_filename = self._fn(self.egg_info, script)
-        namespace['__file__'] = script_filename
-        if os.path.exists(script_filename):
-            with open(script_filename) as fid:
-                source = fid.read()
-            code = compile(source, script_filename, 'exec')
-            exec(code, namespace, namespace)
-        else:
-            from linecache import cache
-            cache[script_filename] = (
-                len(script_text), 0, script_text.split('\n'), script_filename
-            )
-            script_code = compile(script_text, script_filename, 'exec')
-            exec(script_code, namespace, namespace)
-
-    def _has(self, path):
-        raise NotImplementedError(
-            "Can't perform this operation for unregistered loader type"
-        )
-
-    def _isdir(self, path):
-        raise NotImplementedError(
-            "Can't perform this operation for unregistered loader type"
-        )
-
-    def _listdir(self, path):
-        raise NotImplementedError(
-            "Can't perform this operation for unregistered loader type"
-        )
-
-    def _fn(self, base, resource_name):
-        self._validate_resource_path(resource_name)
-        if resource_name:
-            return os.path.join(base, *resource_name.split('/'))
-        return base
-
-    @staticmethod
-    def _validate_resource_path(path):
-        """
-        Validate the resource paths according to the docs.
-        https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access
-
-        >>> warned = getfixture('recwarn')
-        >>> warnings.simplefilter('always')
-        >>> vrp = NullProvider._validate_resource_path
-        >>> vrp('foo/bar.txt')
-        >>> bool(warned)
-        False
-        >>> vrp('../foo/bar.txt')
-        >>> bool(warned)
-        True
-        >>> warned.clear()
-        >>> vrp('/foo/bar.txt')
-        >>> bool(warned)
-        True
-        >>> vrp('foo/../../bar.txt')
-        >>> bool(warned)
-        True
-        >>> warned.clear()
-        >>> vrp('foo/f../bar.txt')
-        >>> bool(warned)
-        False
-
-        Windows path separators are straight-up disallowed.
-        >>> vrp(r'\\foo/bar.txt')
-        Traceback (most recent call last):
-        ...
-        ValueError: Use of .. or absolute path in a resource path \
-is not allowed.
-
-        >>> vrp(r'C:\\foo/bar.txt')
-        Traceback (most recent call last):
-        ...
-        ValueError: Use of .. or absolute path in a resource path \
-is not allowed.
-
-        Blank values are allowed
-
-        >>> vrp('')
-        >>> bool(warned)
-        False
-
-        Non-string values are not.
-
-        >>> vrp(None)
-        Traceback (most recent call last):
-        ...
-        AttributeError: ...
-        """
-        invalid = (
-            os.path.pardir in path.split(posixpath.sep) or
-            posixpath.isabs(path) or
-            ntpath.isabs(path)
-        )
-        if not invalid:
-            return
-
-        msg = "Use of .. or absolute path in a resource path is not allowed."
-
-        # Aggressively disallow Windows absolute paths
-        if ntpath.isabs(path) and not posixpath.isabs(path):
-            raise ValueError(msg)
-
-        # for compatibility, warn; in future
-        # raise ValueError(msg)
-        warnings.warn(
-            msg[:-1] + " and will raise exceptions in a future release.",
-            DeprecationWarning,
-            stacklevel=4,
-        )
-
-    def _get(self, path):
-        if hasattr(self.loader, 'get_data'):
-            return self.loader.get_data(path)
-        raise NotImplementedError(
-            "Can't perform this operation for loaders without 'get_data()'"
-        )
-
-
-register_loader_type(object, NullProvider)
-
-
-def _parents(path):
-    """
-    yield all parents of path including path
-    """
-    last = None
-    while path != last:
-        yield path
-        last = path
-        path, _ = os.path.split(path)
-
-
-class EggProvider(NullProvider):
-    """Provider based on a virtual filesystem"""
-
-    def __init__(self, module):
-        super().__init__(module)
-        self._setup_prefix()
-
-    def _setup_prefix(self):
-        # Assume that metadata may be nested inside a "basket"
-        # of multiple eggs and use module_path instead of .archive.
-        eggs = filter(_is_egg_path, _parents(self.module_path))
-        egg = next(eggs, None)
-        egg and self._set_egg(egg)
-
-    def _set_egg(self, path):
-        self.egg_name = os.path.basename(path)
-        self.egg_info = os.path.join(path, 'EGG-INFO')
-        self.egg_root = path
-
-
-class DefaultProvider(EggProvider):
-    """Provides access to package resources in the filesystem"""
-
-    def _has(self, path):
-        return os.path.exists(path)
-
-    def _isdir(self, path):
-        return os.path.isdir(path)
-
-    def _listdir(self, path):
-        return os.listdir(path)
-
-    def get_resource_stream(self, manager, resource_name):
-        return open(self._fn(self.module_path, resource_name), 'rb')
-
-    def _get(self, path):
-        with open(path, 'rb') as stream:
-            return stream.read()
-
-    @classmethod
-    def _register(cls):
-        loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
-        for name in loader_names:
-            loader_cls = getattr(importlib_machinery, name, type(None))
-            register_loader_type(loader_cls, cls)
-
-
-DefaultProvider._register()
-
-
-class EmptyProvider(NullProvider):
-    """Provider that returns nothing for all requests"""
-
-    module_path = None
-
-    _isdir = _has = lambda self, path: False
-
-    def _get(self, path):
-        return ''
-
-    def _listdir(self, path):
-        return []
-
-    def __init__(self):
-        pass
-
-
-empty_provider = EmptyProvider()
-
-
-class ZipManifests(dict):
-    """
-    zip manifest builder
-    """
-
-    @classmethod
-    def build(cls, path):
-        """
-        Build a dictionary similar to the zipimport directory
-        caches, except instead of tuples, store ZipInfo objects.
-
-        Use a platform-specific path separator (os.sep) for the path keys
-        for compatibility with pypy on Windows.
-        """
-        with zipfile.ZipFile(path) as zfile:
-            items = (
-                (
-                    name.replace('/', os.sep),
-                    zfile.getinfo(name),
-                )
-                for name in zfile.namelist()
-            )
-            return dict(items)
-
-    load = build
-
-
-class MemoizedZipManifests(ZipManifests):
-    """
-    Memoized zipfile manifests.
-    """
-    manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
-
-    def load(self, path):
-        """
-        Load a manifest at path or return a suitable manifest already loaded.
-        """
-        path = os.path.normpath(path)
-        mtime = os.stat(path).st_mtime
-
-        if path not in self or self[path].mtime != mtime:
-            manifest = self.build(path)
-            self[path] = self.manifest_mod(manifest, mtime)
-
-        return self[path].manifest
-
-
-class ZipProvider(EggProvider):
-    """Resource support for zips and eggs"""
-
-    eagers = None
-    _zip_manifests = MemoizedZipManifests()
-
-    def __init__(self, module):
-        super().__init__(module)
-        self.zip_pre = self.loader.archive + os.sep
-
-    def _zipinfo_name(self, fspath):
-        # Convert a virtual filename (full path to file) into a zipfile subpath
-        # usable with the zipimport directory cache for our target archive
-        fspath = fspath.rstrip(os.sep)
-        if fspath == self.loader.archive:
-            return ''
-        if fspath.startswith(self.zip_pre):
-            return fspath[len(self.zip_pre):]
-        raise AssertionError(
-            "%s is not a subpath of %s" % (fspath, self.zip_pre)
-        )
-
-    def _parts(self, zip_path):
-        # Convert a zipfile subpath into an egg-relative path part list.
-        # pseudo-fs path
-        fspath = self.zip_pre + zip_path
-        if fspath.startswith(self.egg_root + os.sep):
-            return fspath[len(self.egg_root) + 1:].split(os.sep)
-        raise AssertionError(
-            "%s is not a subpath of %s" % (fspath, self.egg_root)
-        )
-
-    @property
-    def zipinfo(self):
-        return self._zip_manifests.load(self.loader.archive)
-
-    def get_resource_filename(self, manager, resource_name):
-        if not self.egg_name:
-            raise NotImplementedError(
-                "resource_filename() only supported for .egg, not .zip"
-            )
-        # no need to lock for extraction, since we use temp names
-        zip_path = self._resource_to_zip(resource_name)
-        eagers = self._get_eager_resources()
-        if '/'.join(self._parts(zip_path)) in eagers:
-            for name in eagers:
-                self._extract_resource(manager, self._eager_to_zip(name))
-        return self._extract_resource(manager, zip_path)
-
-    @staticmethod
-    def _get_date_and_size(zip_stat):
-        size = zip_stat.file_size
-        # ymdhms+wday, yday, dst
-        date_time = zip_stat.date_time + (0, 0, -1)
-        # 1980 offset already done
-        timestamp = time.mktime(date_time)
-        return timestamp, size
-
-    # FIXME: 'ZipProvider._extract_resource' is too complex (12)
-    def _extract_resource(self, manager, zip_path):  # noqa: C901
-
-        if zip_path in self._index():
-            for name in self._index()[zip_path]:
-                last = self._extract_resource(
-                    manager, os.path.join(zip_path, name)
-                )
-            # return the extracted directory name
-            return os.path.dirname(last)
-
-        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
-
-        if not WRITE_SUPPORT:
-            raise IOError('"os.rename" and "os.unlink" are not supported '
-                          'on this platform')
-        try:
-
-            real_path = manager.get_cache_path(
-                self.egg_name, self._parts(zip_path)
-            )
-
-            if self._is_current(real_path, zip_path):
-                return real_path
-
-            outf, tmpnam = _mkstemp(
-                ".$extract",
-                dir=os.path.dirname(real_path),
-            )
-            os.write(outf, self.loader.get_data(zip_path))
-            os.close(outf)
-            utime(tmpnam, (timestamp, timestamp))
-            manager.postprocess(tmpnam, real_path)
-
-            try:
-                rename(tmpnam, real_path)
-
-            except os.error:
-                if os.path.isfile(real_path):
-                    if self._is_current(real_path, zip_path):
-                        # the file became current since it was checked above,
-                        #  so proceed.
-                        return real_path
-                    # Windows, del old file and retry
-                    elif os.name == 'nt':
-                        unlink(real_path)
-                        rename(tmpnam, real_path)
-                        return real_path
-                raise
-
-        except os.error:
-            # report a user-friendly error
-            manager.extraction_error()
-
-        return real_path
-
-    def _is_current(self, file_path, zip_path):
-        """
-        Return True if the file_path is current for this zip_path
-        """
-        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
-        if not os.path.isfile(file_path):
-            return False
-        stat = os.stat(file_path)
-        if stat.st_size != size or stat.st_mtime != timestamp:
-            return False
-        # check that the contents match
-        zip_contents = self.loader.get_data(zip_path)
-        with open(file_path, 'rb') as f:
-            file_contents = f.read()
-        return zip_contents == file_contents
-
-    def _get_eager_resources(self):
-        if self.eagers is None:
-            eagers = []
-            for name in ('native_libs.txt', 'eager_resources.txt'):
-                if self.has_metadata(name):
-                    eagers.extend(self.get_metadata_lines(name))
-            self.eagers = eagers
-        return self.eagers
-
-    def _index(self):
-        try:
-            return self._dirindex
-        except AttributeError:
-            ind = {}
-            for path in self.zipinfo:
-                parts = path.split(os.sep)
-                while parts:
-                    parent = os.sep.join(parts[:-1])
-                    if parent in ind:
-                        ind[parent].append(parts[-1])
-                        break
-                    else:
-                        ind[parent] = [parts.pop()]
-            self._dirindex = ind
-            return ind
-
-    def _has(self, fspath):
-        zip_path = self._zipinfo_name(fspath)
-        return zip_path in self.zipinfo or zip_path in self._index()
-
-    def _isdir(self, fspath):
-        return self._zipinfo_name(fspath) in self._index()
-
-    def _listdir(self, fspath):
-        return list(self._index().get(self._zipinfo_name(fspath), ()))
-
-    def _eager_to_zip(self, resource_name):
-        return self._zipinfo_name(self._fn(self.egg_root, resource_name))
-
-    def _resource_to_zip(self, resource_name):
-        return self._zipinfo_name(self._fn(self.module_path, resource_name))
-
-
-register_loader_type(zipimport.zipimporter, ZipProvider)
-
-
-class FileMetadata(EmptyProvider):
-    """Metadata handler for standalone PKG-INFO files
-
-    Usage::
-
-        metadata = FileMetadata("/path/to/PKG-INFO")
-
-    This provider rejects all data and metadata requests except for PKG-INFO,
-    which is treated as existing, and will be the contents of the file at
-    the provided location.
-    """
-
-    def __init__(self, path):
-        self.path = path
-
-    def _get_metadata_path(self, name):
-        return self.path
-
-    def has_metadata(self, name):
-        return name == 'PKG-INFO' and os.path.isfile(self.path)
-
-    def get_metadata(self, name):
-        if name != 'PKG-INFO':
-            raise KeyError("No metadata except PKG-INFO is available")
-
-        with io.open(self.path, encoding='utf-8', errors="replace") as f:
-            metadata = f.read()
-        self._warn_on_replacement(metadata)
-        return metadata
-
-    def _warn_on_replacement(self, metadata):
-        replacement_char = '�'
-        if replacement_char in metadata:
-            tmpl = "{self.path} could not be properly decoded in UTF-8"
-            msg = tmpl.format(**locals())
-            warnings.warn(msg)
-
-    def get_metadata_lines(self, name):
-        return yield_lines(self.get_metadata(name))
-
-
-class PathMetadata(DefaultProvider):
-    """Metadata provider for egg directories
-
-    Usage::
-
-        # Development eggs:
-
-        egg_info = "/path/to/PackageName.egg-info"
-        base_dir = os.path.dirname(egg_info)
-        metadata = PathMetadata(base_dir, egg_info)
-        dist_name = os.path.splitext(os.path.basename(egg_info))[0]
-        dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
-
-        # Unpacked egg directories:
-
-        egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
-        metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
-        dist = Distribution.from_filename(egg_path, metadata=metadata)
-    """
-
-    def __init__(self, path, egg_info):
-        self.module_path = path
-        self.egg_info = egg_info
-
-
-class EggMetadata(ZipProvider):
-    """Metadata provider for .egg files"""
-
-    def __init__(self, importer):
-        """Create a metadata provider from a zipimporter"""
-
-        self.zip_pre = importer.archive + os.sep
-        self.loader = importer
-        if importer.prefix:
-            self.module_path = os.path.join(importer.archive, importer.prefix)
-        else:
-            self.module_path = importer.archive
-        self._setup_prefix()
-
-
-_declare_state('dict', _distribution_finders={})
-
-
-def register_finder(importer_type, distribution_finder):
-    """Register `distribution_finder` to find distributions in sys.path items
-
-    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
-    handler), and `distribution_finder` is a callable that, passed a path
-    item and the importer instance, yields ``Distribution`` instances found on
-    that path item.  See ``pkg_resources.find_on_path`` for an example."""
-    _distribution_finders[importer_type] = distribution_finder
-
-
-def find_distributions(path_item, only=False):
-    """Yield distributions accessible via `path_item`"""
-    importer = get_importer(path_item)
-    finder = _find_adapter(_distribution_finders, importer)
-    return finder(importer, path_item, only)
-
-
-def find_eggs_in_zip(importer, path_item, only=False):
-    """
-    Find eggs in zip files; possibly multiple nested eggs.
-    """
-    if importer.archive.endswith('.whl'):
-        # wheels are not supported with this finder
-        # they don't have PKG-INFO metadata, and won't ever contain eggs
-        return
-    metadata = EggMetadata(importer)
-    if metadata.has_metadata('PKG-INFO'):
-        yield Distribution.from_filename(path_item, metadata=metadata)
-    if only:
-        # don't yield nested distros
-        return
-    for subitem in metadata.resource_listdir(''):
-        if _is_egg_path(subitem):
-            subpath = os.path.join(path_item, subitem)
-            dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
-            for dist in dists:
-                yield dist
-        elif subitem.lower().endswith(('.dist-info', '.egg-info')):
-            subpath = os.path.join(path_item, subitem)
-            submeta = EggMetadata(zipimport.zipimporter(subpath))
-            submeta.egg_info = subpath
-            yield Distribution.from_location(path_item, subitem, submeta)
-
-
-register_finder(zipimport.zipimporter, find_eggs_in_zip)
-
-
-def find_nothing(importer, path_item, only=False):
-    return ()
-
-
-register_finder(object, find_nothing)
-
-
-def _by_version_descending(names):
-    """
-    Given a list of filenames, return them in descending order
-    by version number.
-
-    >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
-    >>> _by_version_descending(names)
-    ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'bar', 'foo']
-    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
-    >>> _by_version_descending(names)
-    ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
-    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
-    >>> _by_version_descending(names)
-    ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
-    """
-    def try_parse(name):
-        """
-        Attempt to parse as a version or return a null version.
-        """
-        try:
-            return packaging.version.Version(name)
-        except Exception:
-            return packaging.version.Version('0')
-
-    def _by_version(name):
-        """
-        Parse each component of the filename
-        """
-        name, ext = os.path.splitext(name)
-        parts = itertools.chain(name.split('-'), [ext])
-        return [try_parse(part) for part in parts]
-
-    return sorted(names, key=_by_version, reverse=True)
-
-
-def find_on_path(importer, path_item, only=False):
-    """Yield distributions accessible on a sys.path directory"""
-    path_item = _normalize_cached(path_item)
-
-    if _is_unpacked_egg(path_item):
-        yield Distribution.from_filename(
-            path_item, metadata=PathMetadata(
-                path_item, os.path.join(path_item, 'EGG-INFO')
-            )
-        )
-        return
-
-    entries = (
-        os.path.join(path_item, child)
-        for child in safe_listdir(path_item)
-    )
-
-    # for performance, before sorting by version,
-    # screen entries for only those that will yield
-    # distributions
-    filtered = (
-        entry
-        for entry in entries
-        if dist_factory(path_item, entry, only)
-    )
-
-    # scan for .egg and .egg-info in directory
-    path_item_entries = _by_version_descending(filtered)
-    for entry in path_item_entries:
-        fullpath = os.path.join(path_item, entry)
-        factory = dist_factory(path_item, entry, only)
-        for dist in factory(fullpath):
-            yield dist
-
-
-def dist_factory(path_item, entry, only):
-    """Return a dist_factory for the given entry."""
-    lower = entry.lower()
-    is_egg_info = lower.endswith('.egg-info')
-    is_dist_info = (
-        lower.endswith('.dist-info') and
-        os.path.isdir(os.path.join(path_item, entry))
-    )
-    is_meta = is_egg_info or is_dist_info
-    return (
-        distributions_from_metadata
-        if is_meta else
-        find_distributions
-        if not only and _is_egg_path(entry) else
-        resolve_egg_link
-        if not only and lower.endswith('.egg-link') else
-        NoDists()
-    )
-
-
-class NoDists:
-    """
-    >>> bool(NoDists())
-    False
-
-    >>> list(NoDists()('anything'))
-    []
-    """
-    def __bool__(self):
-        return False
-
-    def __call__(self, fullpath):
-        return iter(())
-
-
-def safe_listdir(path):
-    """
-    Attempt to list contents of path, but suppress some exceptions.
-    """
-    try:
-        return os.listdir(path)
-    except (PermissionError, NotADirectoryError):
-        pass
-    except OSError as e:
-        # Ignore the directory if does not exist, not a directory or
-        # permission denied
-        if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
-            raise
-    return ()
-
-
-def distributions_from_metadata(path):
-    root = os.path.dirname(path)
-    if os.path.isdir(path):
-        if len(os.listdir(path)) == 0:
-            # empty metadata dir; skip
-            return
-        metadata = PathMetadata(root, path)
-    else:
-        metadata = FileMetadata(path)
-    entry = os.path.basename(path)
-    yield Distribution.from_location(
-        root, entry, metadata, precedence=DEVELOP_DIST,
-    )
-
-
-def non_empty_lines(path):
-    """
-    Yield non-empty lines from file at path
-    """
-    with open(path) as f:
-        for line in f:
-            line = line.strip()
-            if line:
-                yield line
-
-
-def resolve_egg_link(path):
-    """
-    Given a path to an .egg-link, resolve distributions
-    present in the referenced path.
-    """
-    referenced_paths = non_empty_lines(path)
-    resolved_paths = (
-        os.path.join(os.path.dirname(path), ref)
-        for ref in referenced_paths
-    )
-    dist_groups = map(find_distributions, resolved_paths)
-    return next(dist_groups, ())
-
-
-register_finder(pkgutil.ImpImporter, find_on_path)
-
-if hasattr(importlib_machinery, 'FileFinder'):
-    register_finder(importlib_machinery.FileFinder, find_on_path)
-
-_declare_state('dict', _namespace_handlers={})
-_declare_state('dict', _namespace_packages={})
-
-
-def register_namespace_handler(importer_type, namespace_handler):
-    """Register `namespace_handler` to declare namespace packages
-
-    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
-    handler), and `namespace_handler` is a callable like this::
-
-        def namespace_handler(importer, path_entry, moduleName, module):
-            # return a path_entry to use for child packages
-
-    Namespace handlers are only called if the importer object has already
-    agreed that it can handle the relevant path item, and they should only
-    return a subpath if the module __path__ does not already contain an
-    equivalent subpath.  For an example namespace handler, see
-    ``pkg_resources.file_ns_handler``.
-    """
-    _namespace_handlers[importer_type] = namespace_handler
-
-
-def _handle_ns(packageName, path_item):
-    """Ensure that named package includes a subpath of path_item (if needed)"""
-
-    importer = get_importer(path_item)
-    if importer is None:
-        return None
-
-    # use find_spec (PEP 451) and fall-back to find_module (PEP 302)
-    try:
-        spec = importer.find_spec(packageName)
-    except AttributeError:
-        # capture warnings due to #1111
-        with warnings.catch_warnings():
-            warnings.simplefilter("ignore")
-            loader = importer.find_module(packageName)
-    else:
-        loader = spec.loader if spec else None
-
-    if loader is None:
-        return None
-    module = sys.modules.get(packageName)
-    if module is None:
-        module = sys.modules[packageName] = types.ModuleType(packageName)
-        module.__path__ = []
-        _set_parent_ns(packageName)
-    elif not hasattr(module, '__path__'):
-        raise TypeError("Not a package:", packageName)
-    handler = _find_adapter(_namespace_handlers, importer)
-    subpath = handler(importer, path_item, packageName, module)
-    if subpath is not None:
-        path = module.__path__
-        path.append(subpath)
-        importlib.import_module(packageName)
-        _rebuild_mod_path(path, packageName, module)
-    return subpath
-
-
-def _rebuild_mod_path(orig_path, package_name, module):
-    """
-    Rebuild module.__path__ ensuring that all entries are ordered
-    corresponding to their sys.path order
-    """
-    sys_path = [_normalize_cached(p) for p in sys.path]
-
-    def safe_sys_path_index(entry):
-        """
-        Workaround for #520 and #513.
-        """
-        try:
-            return sys_path.index(entry)
-        except ValueError:
-            return float('inf')
-
-    def position_in_sys_path(path):
-        """
-        Return the ordinal of the path based on its position in sys.path
-        """
-        path_parts = path.split(os.sep)
-        module_parts = package_name.count('.') + 1
-        parts = path_parts[:-module_parts]
-        return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
-
-    new_path = sorted(orig_path, key=position_in_sys_path)
-    new_path = [_normalize_cached(p) for p in new_path]
-
-    if isinstance(module.__path__, list):
-        module.__path__[:] = new_path
-    else:
-        module.__path__ = new_path
-
-
-def declare_namespace(packageName):
-    """Declare that package 'packageName' is a namespace package"""
-
-    _imp.acquire_lock()
-    try:
-        if packageName in _namespace_packages:
-            return
-
-        path = sys.path
-        parent, _, _ = packageName.rpartition('.')
-
-        if parent:
-            declare_namespace(parent)
-            if parent not in _namespace_packages:
-                __import__(parent)
-            try:
-                path = sys.modules[parent].__path__
-            except AttributeError as e:
-                raise TypeError("Not a package:", parent) from e
-
-        # Track what packages are namespaces, so when new path items are added,
-        # they can be updated
-        _namespace_packages.setdefault(parent or None, []).append(packageName)
-        _namespace_packages.setdefault(packageName, [])
-
-        for path_item in path:
-            # Ensure all the parent's path items are reflected in the child,
-            # if they apply
-            _handle_ns(packageName, path_item)
-
-    finally:
-        _imp.release_lock()
-
-
-def fixup_namespace_packages(path_item, parent=None):
-    """Ensure that previously-declared namespace packages include path_item"""
-    _imp.acquire_lock()
-    try:
-        for package in _namespace_packages.get(parent, ()):
-            subpath = _handle_ns(package, path_item)
-            if subpath:
-                fixup_namespace_packages(subpath, package)
-    finally:
-        _imp.release_lock()
-
-
-def file_ns_handler(importer, path_item, packageName, module):
-    """Compute an ns-package subpath for a filesystem or zipfile importer"""
-
-    subpath = os.path.join(path_item, packageName.split('.')[-1])
-    normalized = _normalize_cached(subpath)
-    for item in module.__path__:
-        if _normalize_cached(item) == normalized:
-            break
-    else:
-        # Only return the path if it's not already there
-        return subpath
-
-
-register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
-register_namespace_handler(zipimport.zipimporter, file_ns_handler)
-
-if hasattr(importlib_machinery, 'FileFinder'):
-    register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
-
-
-def null_ns_handler(importer, path_item, packageName, module):
-    return None
-
-
-register_namespace_handler(object, null_ns_handler)
-
-
-def normalize_path(filename):
-    """Normalize a file/dir name for comparison purposes"""
-    return os.path.normcase(os.path.realpath(os.path.normpath(
-        _cygwin_patch(filename))))
-
-
-def _cygwin_patch(filename):  # pragma: nocover
-    """
-    Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
-    symlink components. Using
-    os.path.abspath() works around this limitation. A fix in os.getcwd()
-    would probably better, in Cygwin even more so, except
-    that this seems to be by design...
-    """
-    return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
-
-
-def _normalize_cached(filename, _cache={}):
-    try:
-        return _cache[filename]
-    except KeyError:
-        _cache[filename] = result = normalize_path(filename)
-        return result
-
-
-def _is_egg_path(path):
-    """
-    Determine if given path appears to be an egg.
-    """
-    return _is_zip_egg(path) or _is_unpacked_egg(path)
-
-
-def _is_zip_egg(path):
-    return (
-        path.lower().endswith('.egg') and
-        os.path.isfile(path) and
-        zipfile.is_zipfile(path)
-    )
-
-
-def _is_unpacked_egg(path):
-    """
-    Determine if given path appears to be an unpacked egg.
-    """
-    return (
-        path.lower().endswith('.egg') and
-        os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
-    )
-
-
-def _set_parent_ns(packageName):
-    parts = packageName.split('.')
-    name = parts.pop()
-    if parts:
-        parent = '.'.join(parts)
-        setattr(sys.modules[parent], name, sys.modules[packageName])
-
-
-MODULE = re.compile(r"\w+(\.\w+)*$").match
-EGG_NAME = re.compile(
-    r"""
-    (?P<name>[^-]+) (
-        -(?P<ver>[^-]+) (
-            -py(?P<pyver>[^-]+) (
-                -(?P<plat>.+)
-            )?
-        )?
-    )?
-    """,
-    re.VERBOSE | re.IGNORECASE,
-).match
-
-
-class EntryPoint:
-    """Object representing an advertised importable object"""
-
-    def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
-        if not MODULE(module_name):
-            raise ValueError("Invalid module name", module_name)
-        self.name = name
-        self.module_name = module_name
-        self.attrs = tuple(attrs)
-        self.extras = tuple(extras)
-        self.dist = dist
-
-    def __str__(self):
-        s = "%s = %s" % (self.name, self.module_name)
-        if self.attrs:
-            s += ':' + '.'.join(self.attrs)
-        if self.extras:
-            s += ' [%s]' % ','.join(self.extras)
-        return s
-
-    def __repr__(self):
-        return "EntryPoint.parse(%r)" % str(self)
-
-    def load(self, require=True, *args, **kwargs):
-        """
-        Require packages for this EntryPoint, then resolve it.
-        """
-        if not require or args or kwargs:
-            warnings.warn(
-                "Parameters to load are deprecated.  Call .resolve and "
-                ".require separately.",
-                PkgResourcesDeprecationWarning,
-                stacklevel=2,
-            )
-        if require:
-            self.require(*args, **kwargs)
-        return self.resolve()
-
-    def resolve(self):
-        """
-        Resolve the entry point from its module and attrs.
-        """
-        module = __import__(self.module_name, fromlist=['__name__'], level=0)
-        try:
-            return functools.reduce(getattr, self.attrs, module)
-        except AttributeError as exc:
-            raise ImportError(str(exc)) from exc
-
-    def require(self, env=None, installer=None):
-        if self.extras and not self.dist:
-            raise UnknownExtra("Can't require() without a distribution", self)
-
-        # Get the requirements for this entry point with all its extras and
-        # then resolve them. We have to pass `extras` along when resolving so
-        # that the working set knows what extras we want. Otherwise, for
-        # dist-info distributions, the working set will assume that the
-        # requirements for that extra are purely optional and skip over them.
-        reqs = self.dist.requires(self.extras)
-        items = working_set.resolve(reqs, env, installer, extras=self.extras)
-        list(map(working_set.add, items))
-
-    pattern = re.compile(
-        r'\s*'
-        r'(?P<name>.+?)\s*'
-        r'=\s*'
-        r'(?P<module>[\w.]+)\s*'
-        r'(:\s*(?P<attr>[\w.]+))?\s*'
-        r'(?P<extras>\[.*\])?\s*$'
-    )
-
-    @classmethod
-    def parse(cls, src, dist=None):
-        """Parse a single entry point from string `src`
-
-        Entry point syntax follows the form::
-
-            name = some.module:some.attr [extra1, extra2]
-
-        The entry name and module name are required, but the ``:attrs`` and
-        ``[extras]`` parts are optional
-        """
-        m = cls.pattern.match(src)
-        if not m:
-            msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
-            raise ValueError(msg, src)
-        res = m.groupdict()
-        extras = cls._parse_extras(res['extras'])
-        attrs = res['attr'].split('.') if res['attr'] else ()
-        return cls(res['name'], res['module'], attrs, extras, dist)
-
-    @classmethod
-    def _parse_extras(cls, extras_spec):
-        if not extras_spec:
-            return ()
-        req = Requirement.parse('x' + extras_spec)
-        if req.specs:
-            raise ValueError()
-        return req.extras
-
-    @classmethod
-    def parse_group(cls, group, lines, dist=None):
-        """Parse an entry point group"""
-        if not MODULE(group):
-            raise ValueError("Invalid group name", group)
-        this = {}
-        for line in yield_lines(lines):
-            ep = cls.parse(line, dist)
-            if ep.name in this:
-                raise ValueError("Duplicate entry point", group, ep.name)
-            this[ep.name] = ep
-        return this
-
-    @classmethod
-    def parse_map(cls, data, dist=None):
-        """Parse a map of entry point groups"""
-        if isinstance(data, dict):
-            data = data.items()
-        else:
-            data = split_sections(data)
-        maps = {}
-        for group, lines in data:
-            if group is None:
-                if not lines:
-                    continue
-                raise ValueError("Entry points must be listed in groups")
-            group = group.strip()
-            if group in maps:
-                raise ValueError("Duplicate group name", group)
-            maps[group] = cls.parse_group(group, lines, dist)
-        return maps
-
-
-def _version_from_file(lines):
-    """
-    Given an iterable of lines from a Metadata file, return
-    the value of the Version field, if present, or None otherwise.
-    """
-    def is_version_line(line):
-        return line.lower().startswith('version:')
-    version_lines = filter(is_version_line, lines)
-    line = next(iter(version_lines), '')
-    _, _, value = line.partition(':')
-    return safe_version(value.strip()) or None
-
-
-class Distribution:
-    """Wrap an actual or potential sys.path entry w/metadata"""
-    PKG_INFO = 'PKG-INFO'
-
-    def __init__(
-            self, location=None, metadata=None, project_name=None,
-            version=None, py_version=PY_MAJOR, platform=None,
-            precedence=EGG_DIST):
-        self.project_name = safe_name(project_name or 'Unknown')
-        if version is not None:
-            self._version = safe_version(version)
-        self.py_version = py_version
-        self.platform = platform
-        self.location = location
-        self.precedence = precedence
-        self._provider = metadata or empty_provider
-
-    @classmethod
-    def from_location(cls, location, basename, metadata=None, **kw):
-        project_name, version, py_version, platform = [None] * 4
-        basename, ext = os.path.splitext(basename)
-        if ext.lower() in _distributionImpl:
-            cls = _distributionImpl[ext.lower()]
-
-            match = EGG_NAME(basename)
-            if match:
-                project_name, version, py_version, platform = match.group(
-                    'name', 'ver', 'pyver', 'plat'
-                )
-        return cls(
-            location, metadata, project_name=project_name, version=version,
-            py_version=py_version, platform=platform, **kw
-        )._reload_version()
-
-    def _reload_version(self):
-        return self
-
-    @property
-    def hashcmp(self):
-        return (
-            self.parsed_version,
-            self.precedence,
-            self.key,
-            self.location,
-            self.py_version or '',
-            self.platform or '',
-        )
-
-    def __hash__(self):
-        return hash(self.hashcmp)
-
-    def __lt__(self, other):
-        return self.hashcmp < other.hashcmp
-
-    def __le__(self, other):
-        return self.hashcmp <= other.hashcmp
-
-    def __gt__(self, other):
-        return self.hashcmp > other.hashcmp
-
-    def __ge__(self, other):
-        return self.hashcmp >= other.hashcmp
-
-    def __eq__(self, other):
-        if not isinstance(other, self.__class__):
-            # It's not a Distribution, so they are not equal
-            return False
-        return self.hashcmp == other.hashcmp
-
-    def __ne__(self, other):
-        return not self == other
-
-    # These properties have to be lazy so that we don't have to load any
-    # metadata until/unless it's actually needed.  (i.e., some distributions
-    # may not know their name or version without loading PKG-INFO)
-
-    @property
-    def key(self):
-        try:
-            return self._key
-        except AttributeError:
-            self._key = key = self.project_name.lower()
-            return key
-
-    @property
-    def parsed_version(self):
-        if not hasattr(self, "_parsed_version"):
-            self._parsed_version = parse_version(self.version)
-
-        return self._parsed_version
-
-    def _warn_legacy_version(self):
-        LV = packaging.version.LegacyVersion
-        is_legacy = isinstance(self._parsed_version, LV)
-        if not is_legacy:
-            return
-
-        # While an empty version is technically a legacy version and
-        # is not a valid PEP 440 version, it's also unlikely to
-        # actually come from someone and instead it is more likely that
-        # it comes from setuptools attempting to parse a filename and
-        # including it in the list. So for that we'll gate this warning
-        # on if the version is anything at all or not.
-        if not self.version:
-            return
-
-        tmpl = textwrap.dedent("""
-            '{project_name} ({version})' is being parsed as a legacy,
-            non PEP 440,
-            version. You may find odd behavior and sort order.
-            In particular it will be sorted as less than 0.0. It
-            is recommended to migrate to PEP 440 compatible
-            versions.
-            """).strip().replace('\n', ' ')
-
-        warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
-
-    @property
-    def version(self):
-        try:
-            return self._version
-        except AttributeError as e:
-            version = self._get_version()
-            if version is None:
-                path = self._get_metadata_path_for_display(self.PKG_INFO)
-                msg = (
-                    "Missing 'Version:' header and/or {} file at path: {}"
-                ).format(self.PKG_INFO, path)
-                raise ValueError(msg, self) from e
-
-            return version
-
-    @property
-    def _dep_map(self):
-        """
-        A map of extra to its list of (direct) requirements
-        for this distribution, including the null extra.
-        """
-        try:
-            return self.__dep_map
-        except AttributeError:
-            self.__dep_map = self._filter_extras(self._build_dep_map())
-        return self.__dep_map
-
-    @staticmethod
-    def _filter_extras(dm):
-        """
-        Given a mapping of extras to dependencies, strip off
-        environment markers and filter out any dependencies
-        not matching the markers.
-        """
-        for extra in list(filter(None, dm)):
-            new_extra = extra
-            reqs = dm.pop(extra)
-            new_extra, _, marker = extra.partition(':')
-            fails_marker = marker and (
-                invalid_marker(marker)
-                or not evaluate_marker(marker)
-            )
-            if fails_marker:
-                reqs = []
-            new_extra = safe_extra(new_extra) or None
-
-            dm.setdefault(new_extra, []).extend(reqs)
-        return dm
-
-    def _build_dep_map(self):
-        dm = {}
-        for name in 'requires.txt', 'depends.txt':
-            for extra, reqs in split_sections(self._get_metadata(name)):
-                dm.setdefault(extra, []).extend(parse_requirements(reqs))
-        return dm
-
-    def requires(self, extras=()):
-        """List of Requirements needed for this distro if `extras` are used"""
-        dm = self._dep_map
-        deps = []
-        deps.extend(dm.get(None, ()))
-        for ext in extras:
-            try:
-                deps.extend(dm[safe_extra(ext)])
-            except KeyError as e:
-                raise UnknownExtra(
-                    "%s has no such extra feature %r" % (self, ext)
-                ) from e
-        return deps
-
-    def _get_metadata_path_for_display(self, name):
-        """
-        Return the path to the given metadata file, if available.
-        """
-        try:
-            # We need to access _get_metadata_path() on the provider object
-            # directly rather than through this class's __getattr__()
-            # since _get_metadata_path() is marked private.
-            path = self._provider._get_metadata_path(name)
-
-        # Handle exceptions e.g. in case the distribution's metadata
-        # provider doesn't support _get_metadata_path().
-        except Exception:
-            return '[could not detect]'
-
-        return path
-
-    def _get_metadata(self, name):
-        if self.has_metadata(name):
-            for line in self.get_metadata_lines(name):
-                yield line
-
-    def _get_version(self):
-        lines = self._get_metadata(self.PKG_INFO)
-        version = _version_from_file(lines)
-
-        return version
-
-    def activate(self, path=None, replace=False):
-        """Ensure distribution is importable on `path` (default=sys.path)"""
-        if path is None:
-            path = sys.path
-        self.insert_on(path, replace=replace)
-        if path is sys.path:
-            fixup_namespace_packages(self.location)
-            for pkg in self._get_metadata('namespace_packages.txt'):
-                if pkg in sys.modules:
-                    declare_namespace(pkg)
-
-    def egg_name(self):
-        """Return what this distribution's standard .egg filename should be"""
-        filename = "%s-%s-py%s" % (
-            to_filename(self.project_name), to_filename(self.version),
-            self.py_version or PY_MAJOR
-        )
-
-        if self.platform:
-            filename += '-' + self.platform
-        return filename
-
-    def __repr__(self):
-        if self.location:
-            return "%s (%s)" % (self, self.location)
-        else:
-            return str(self)
-
-    def __str__(self):
-        try:
-            version = getattr(self, 'version', None)
-        except ValueError:
-            version = None
-        version = version or "[unknown version]"
-        return "%s %s" % (self.project_name, version)
-
-    def __getattr__(self, attr):
-        """Delegate all unrecognized public attributes to .metadata provider"""
-        if attr.startswith('_'):
-            raise AttributeError(attr)
-        return getattr(self._provider, attr)
-
-    def __dir__(self):
-        return list(
-            set(super(Distribution, self).__dir__())
-            | set(
-                attr for attr in self._provider.__dir__()
-                if not attr.startswith('_')
-            )
-        )
-
-    @classmethod
-    def from_filename(cls, filename, metadata=None, **kw):
-        return cls.from_location(
-            _normalize_cached(filename), os.path.basename(filename), metadata,
-            **kw
-        )
-
-    def as_requirement(self):
-        """Return a ``Requirement`` that matches this distribution exactly"""
-        if isinstance(self.parsed_version, packaging.version.Version):
-            spec = "%s==%s" % (self.project_name, self.parsed_version)
-        else:
-            spec = "%s===%s" % (self.project_name, self.parsed_version)
-
-        return Requirement.parse(spec)
-
-    def load_entry_point(self, group, name):
-        """Return the `name` entry point of `group` or raise ImportError"""
-        ep = self.get_entry_info(group, name)
-        if ep is None:
-            raise ImportError("Entry point %r not found" % ((group, name),))
-        return ep.load()
-
-    def get_entry_map(self, group=None):
-        """Return the entry point map for `group`, or the full entry map"""
-        try:
-            ep_map = self._ep_map
-        except AttributeError:
-            ep_map = self._ep_map = EntryPoint.parse_map(
-                self._get_metadata('entry_points.txt'), self
-            )
-        if group is not None:
-            return ep_map.get(group, {})
-        return ep_map
-
-    def get_entry_info(self, group, name):
-        """Return the EntryPoint object for `group`+`name`, or ``None``"""
-        return self.get_entry_map(group).get(name)
-
-    # FIXME: 'Distribution.insert_on' is too complex (13)
-    def insert_on(self, path, loc=None, replace=False):  # noqa: C901
-        """Ensure self.location is on path
-
-        If replace=False (default):
-            - If location is already in path anywhere, do nothing.
-            - Else:
-              - If it's an egg and its parent directory is on path,
-                insert just ahead of the parent.
-              - Else: add to the end of path.
-        If replace=True:
-            - If location is already on path anywhere (not eggs)
-              or higher priority than its parent (eggs)
-              do nothing.
-            - Else:
-              - If it's an egg and its parent directory is on path,
-                insert just ahead of the parent,
-                removing any lower-priority entries.
-              - Else: add it to the front of path.
-        """
-
-        loc = loc or self.location
-        if not loc:
-            return
-
-        nloc = _normalize_cached(loc)
-        bdir = os.path.dirname(nloc)
-        npath = [(p and _normalize_cached(p) or p) for p in path]
-
-        for p, item in enumerate(npath):
-            if item == nloc:
-                if replace:
-                    break
-                else:
-                    # don't modify path (even removing duplicates) if
-                    # found and not replace
-                    return
-            elif item == bdir and self.precedence == EGG_DIST:
-                # if it's an .egg, give it precedence over its directory
-                # UNLESS it's already been added to sys.path and replace=False
-                if (not replace) and nloc in npath[p:]:
-                    return
-                if path is sys.path:
-                    self.check_version_conflict()
-                path.insert(p, loc)
-                npath.insert(p, nloc)
-                break
-        else:
-            if path is sys.path:
-                self.check_version_conflict()
-            if replace:
-                path.insert(0, loc)
-            else:
-                path.append(loc)
-            return
-
-        # p is the spot where we found or inserted loc; now remove duplicates
-        while True:
-            try:
-                np = npath.index(nloc, p + 1)
-            except ValueError:
-                break
-            else:
-                del npath[np], path[np]
-                # ha!
-                p = np
-
-        return
-
-    def check_version_conflict(self):
-        if self.key == 'setuptools':
-            # ignore the inevitable setuptools self-conflicts  :(
-            return
-
-        nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
-        loc = normalize_path(self.location)
-        for modname in self._get_metadata('top_level.txt'):
-            if (modname not in sys.modules or modname in nsp
-                    or modname in _namespace_packages):
-                continue
-            if modname in ('pkg_resources', 'setuptools', 'site'):
-                continue
-            fn = getattr(sys.modules[modname], '__file__', None)
-            if fn and (normalize_path(fn).startswith(loc) or
-                       fn.startswith(self.location)):
-                continue
-            issue_warning(
-                "Module %s was already imported from %s, but %s is being added"
-                " to sys.path" % (modname, fn, self.location),
-            )
-
-    def has_version(self):
-        try:
-            self.version
-        except ValueError:
-            issue_warning("Unbuilt egg for " + repr(self))
-            return False
-        return True
-
-    def clone(self, **kw):
-        """Copy this distribution, substituting in any changed keyword args"""
-        names = 'project_name version py_version platform location precedence'
-        for attr in names.split():
-            kw.setdefault(attr, getattr(self, attr, None))
-        kw.setdefault('metadata', self._provider)
-        return self.__class__(**kw)
-
-    @property
-    def extras(self):
-        return [dep for dep in self._dep_map if dep]
-
-
-class EggInfoDistribution(Distribution):
-    def _reload_version(self):
-        """
-        Packages installed by distutils (e.g. numpy or scipy),
-        which uses an old safe_version, and so
-        their version numbers can get mangled when
-        converted to filenames (e.g., 1.11.0.dev0+2329eae to
-        1.11.0.dev0_2329eae). These distributions will not be
-        parsed properly
-        downstream by Distribution and safe_version, so
-        take an extra step and try to get the version number from
-        the metadata file itself instead of the filename.
-        """
-        md_version = self._get_version()
-        if md_version:
-            self._version = md_version
-        return self
-
-
-class DistInfoDistribution(Distribution):
-    """
-    Wrap an actual or potential sys.path entry
-    w/metadata, .dist-info style.
-    """
-    PKG_INFO = 'METADATA'
-    EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
-
-    @property
-    def _parsed_pkg_info(self):
-        """Parse and cache metadata"""
-        try:
-            return self._pkg_info
-        except AttributeError:
-            metadata = self.get_metadata(self.PKG_INFO)
-            self._pkg_info = email.parser.Parser().parsestr(metadata)
-            return self._pkg_info
-
-    @property
-    def _dep_map(self):
-        try:
-            return self.__dep_map
-        except AttributeError:
-            self.__dep_map = self._compute_dependencies()
-            return self.__dep_map
-
-    def _compute_dependencies(self):
-        """Recompute this distribution's dependencies."""
-        dm = self.__dep_map = {None: []}
-
-        reqs = []
-        # Including any condition expressions
-        for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
-            reqs.extend(parse_requirements(req))
-
-        def reqs_for_extra(extra):
-            for req in reqs:
-                if not req.marker or req.marker.evaluate({'extra': extra}):
-                    yield req
-
-        common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None)))
-        dm[None].extend(common)
-
-        for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
-            s_extra = safe_extra(extra.strip())
-            dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common]
-
-        return dm
-
-
-_distributionImpl = {
-    '.egg': Distribution,
-    '.egg-info': EggInfoDistribution,
-    '.dist-info': DistInfoDistribution,
-}
-
-
-def issue_warning(*args, **kw):
-    level = 1
-    g = globals()
-    try:
-        # find the first stack frame that is *not* code in
-        # the pkg_resources module, to use for the warning
-        while sys._getframe(level).f_globals is g:
-            level += 1
-    except ValueError:
-        pass
-    warnings.warn(stacklevel=level + 1, *args, **kw)
-
-
-def parse_requirements(strs):
-    """
-    Yield ``Requirement`` objects for each specification in `strs`.
-
-    `strs` must be a string, or a (possibly-nested) iterable thereof.
-    """
-    return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs))))
-
-
-class RequirementParseError(packaging.requirements.InvalidRequirement):
-    "Compatibility wrapper for InvalidRequirement"
-
-
-class Requirement(packaging.requirements.Requirement):
-    def __init__(self, requirement_string):
-        """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
-        super(Requirement, self).__init__(requirement_string)
-        self.unsafe_name = self.name
-        project_name = safe_name(self.name)
-        self.project_name, self.key = project_name, project_name.lower()
-        self.specs = [
-            (spec.operator, spec.version) for spec in self.specifier]
-        self.extras = tuple(map(safe_extra, self.extras))
-        self.hashCmp = (
-            self.key,
-            self.url,
-            self.specifier,
-            frozenset(self.extras),
-            str(self.marker) if self.marker else None,
-        )
-        self.__hash = hash(self.hashCmp)
-
-    def __eq__(self, other):
-        return (
-            isinstance(other, Requirement) and
-            self.hashCmp == other.hashCmp
-        )
-
-    def __ne__(self, other):
-        return not self == other
-
-    def __contains__(self, item):
-        if isinstance(item, Distribution):
-            if item.key != self.key:
-                return False
-
-            item = item.version
-
-        # Allow prereleases always in order to match the previous behavior of
-        # this method. In the future this should be smarter and follow PEP 440
-        # more accurately.
-        return self.specifier.contains(item, prereleases=True)
-
-    def __hash__(self):
-        return self.__hash
-
-    def __repr__(self):
-        return "Requirement.parse(%r)" % str(self)
-
-    @staticmethod
-    def parse(s):
-        req, = parse_requirements(s)
-        return req
-
-
-def _always_object(classes):
-    """
-    Ensure object appears in the mro even
-    for old-style classes.
-    """
-    if object not in classes:
-        return classes + (object,)
-    return classes
-
-
-def _find_adapter(registry, ob):
-    """Return an adapter factory for `ob` from `registry`"""
-    types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
-    for t in types:
-        if t in registry:
-            return registry[t]
-
-
-def ensure_directory(path):
-    """Ensure that the parent directory of `path` exists"""
-    dirname = os.path.dirname(path)
-    os.makedirs(dirname, exist_ok=True)
-
-
-def _bypass_ensure_directory(path):
-    """Sandbox-bypassing version of ensure_directory()"""
-    if not WRITE_SUPPORT:
-        raise IOError('"os.mkdir" not supported on this platform.')
-    dirname, filename = split(path)
-    if dirname and filename and not isdir(dirname):
-        _bypass_ensure_directory(dirname)
-        try:
-            mkdir(dirname, 0o755)
-        except FileExistsError:
-            pass
-
-
-def split_sections(s):
-    """Split a string or iterable thereof into (section, content) pairs
-
-    Each ``section`` is a stripped version of the section header ("[section]")
-    and each ``content`` is a list of stripped lines excluding blank lines and
-    comment-only lines.  If there are any such lines before the first section
-    header, they're returned in a first ``section`` of ``None``.
-    """
-    section = None
-    content = []
-    for line in yield_lines(s):
-        if line.startswith("["):
-            if line.endswith("]"):
-                if section or content:
-                    yield section, content
-                section = line[1:-1].strip()
-                content = []
-            else:
-                raise ValueError("Invalid section heading", line)
-        else:
-            content.append(line)
-
-    # wrap up last segment
-    yield section, content
-
-
-def _mkstemp(*args, **kw):
-    old_open = os.open
-    try:
-        # temporarily bypass sandboxing
-        os.open = os_open
-        return tempfile.mkstemp(*args, **kw)
-    finally:
-        # and then put it back
-        os.open = old_open
-
-
-# Silence the PEP440Warning by default, so that end users don't get hit by it
-# randomly just because they use pkg_resources. We want to append the rule
-# because we want earlier uses of filterwarnings to take precedence over this
-# one.
-warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
-
-
-# from jaraco.functools 1.3
-def _call_aside(f, *args, **kwargs):
-    f(*args, **kwargs)
-    return f
-
-
-@_call_aside
-def _initialize(g=globals()):
-    "Set up global resource manager (deliberately not state-saved)"
-    manager = ResourceManager()
-    g['_manager'] = manager
-    g.update(
-        (name, getattr(manager, name))
-        for name in dir(manager)
-        if not name.startswith('_')
-    )
-
-
-class PkgResourcesDeprecationWarning(Warning):
-    """
-    Base class for warning about deprecations in ``pkg_resources``
-
-    This class is not derived from ``DeprecationWarning``, and as such is
-    visible by default.
-    """
-
-
-@_call_aside
-def _initialize_master_working_set():
-    """
-    Prepare the master working set and make the ``require()``
-    API available.
-
-    This function has explicit effects on the global state
-    of pkg_resources. It is intended to be invoked once at
-    the initialization of this module.
-
-    Invocation by other packages is unsupported and done
-    at their own risk.
-    """
-    working_set = WorkingSet._build_master()
-    _declare_state('object', working_set=working_set)
-
-    require = working_set.require
-    iter_entry_points = working_set.iter_entry_points
-    add_activation_listener = working_set.subscribe
-    run_script = working_set.run_script
-    # backward compatibility
-    run_main = run_script
-    # Activate all distributions already on sys.path with replace=False and
-    # ensure that all distributions added to the working set in the future
-    # (e.g. by calling ``require()``) will get activated as well,
-    # with higher priority (replace=True).
-    tuple(
-        dist.activate(replace=False)
-        for dist in working_set
-    )
-    add_activation_listener(
-        lambda dist: dist.activate(replace=True),
-        existing=False,
-    )
-    working_set.entries = []
-    # match order
-    list(map(working_set.add_entry, sys.path))
-    globals().update(locals())
diff --git a/env/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc
deleted file mode 100644
index c32783c..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/__init__.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-310.pyc
deleted file mode 100644
index a211159..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-310.pyc
deleted file mode 100644
index 3a4173f..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/zipp.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/zipp.cpython-310.pyc
deleted file mode 100644
index 4d2e691..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/__pycache__/zipp.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py
deleted file mode 100644
index ae67001..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/appdirs.py
+++ /dev/null
@@ -1,608 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2005-2010 ActiveState Software Inc.
-# Copyright (c) 2013 Eddy Petrișor
-
-"""Utilities for determining application-specific dirs.
-
-See <http://github.com/ActiveState/appdirs> for details and usage.
-"""
-# Dev Notes:
-# - MSDN on where to store app data files:
-#   http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
-# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
-# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
-
-__version_info__ = (1, 4, 3)
-__version__ = '.'.join(map(str, __version_info__))
-
-
-import sys
-import os
-
-PY3 = sys.version_info[0] == 3
-
-if PY3:
-    unicode = str
-
-if sys.platform.startswith('java'):
-    import platform
-    os_name = platform.java_ver()[3][0]
-    if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
-        system = 'win32'
-    elif os_name.startswith('Mac'): # "Mac OS X", etc.
-        system = 'darwin'
-    else: # "Linux", "SunOS", "FreeBSD", etc.
-        # Setting this to "linux2" is not ideal, but only Windows or Mac
-        # are actually checked for and the rest of the module expects
-        # *sys.platform* style strings.
-        system = 'linux2'
-else:
-    system = sys.platform
-
-
-
-def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
-    r"""Return full path to the user-specific data dir for this application.
-
-        "appname" is the name of application.
-            If None, just the system directory is returned.
-        "appauthor" (only used on Windows) is the name of the
-            appauthor or distributing body for this application. Typically
-            it is the owning company name. This falls back to appname. You may
-            pass False to disable it.
-        "version" is an optional version path element to append to the
-            path. You might want to use this if you want multiple versions
-            of your app to be able to run independently. If used, this
-            would typically be "<major>.<minor>".
-            Only applied when appname is present.
-        "roaming" (boolean, default False) can be set True to use the Windows
-            roaming appdata directory. That means that for users on a Windows
-            network setup for roaming profiles, this user data will be
-            sync'd on login. See
-            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
-            for a discussion of issues.
-
-    Typical user data directories are:
-        Mac OS X:               ~/Library/Application Support/<AppName>
-        Unix:                   ~/.local/share/<AppName>    # or in $XDG_DATA_HOME, if defined
-        Win XP (not roaming):   C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
-        Win XP (roaming):       C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
-        Win 7  (not roaming):   C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
-        Win 7  (roaming):       C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
-
-    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
-    That means, by default "~/.local/share/<AppName>".
-    """
-    if system == "win32":
-        if appauthor is None:
-            appauthor = appname
-        const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
-        path = os.path.normpath(_get_win_folder(const))
-        if appname:
-            if appauthor is not False:
-                path = os.path.join(path, appauthor, appname)
-            else:
-                path = os.path.join(path, appname)
-    elif system == 'darwin':
-        path = os.path.expanduser('~/Library/Application Support/')
-        if appname:
-            path = os.path.join(path, appname)
-    else:
-        path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
-        if appname:
-            path = os.path.join(path, appname)
-    if appname and version:
-        path = os.path.join(path, version)
-    return path
-
-
-def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
-    r"""Return full path to the user-shared data dir for this application.
-
-        "appname" is the name of application.
-            If None, just the system directory is returned.
-        "appauthor" (only used on Windows) is the name of the
-            appauthor or distributing body for this application. Typically
-            it is the owning company name. This falls back to appname. You may
-            pass False to disable it.
-        "version" is an optional version path element to append to the
-            path. You might want to use this if you want multiple versions
-            of your app to be able to run independently. If used, this
-            would typically be "<major>.<minor>".
-            Only applied when appname is present.
-        "multipath" is an optional parameter only applicable to *nix
-            which indicates that the entire list of data dirs should be
-            returned. By default, the first item from XDG_DATA_DIRS is
-            returned, or '/usr/local/share/<AppName>',
-            if XDG_DATA_DIRS is not set
-
-    Typical site data directories are:
-        Mac OS X:   /Library/Application Support/<AppName>
-        Unix:       /usr/local/share/<AppName> or /usr/share/<AppName>
-        Win XP:     C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
-        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
-        Win 7:      C:\ProgramData\<AppAuthor>\<AppName>   # Hidden, but writeable on Win 7.
-
-    For Unix, this is using the $XDG_DATA_DIRS[0] default.
-
-    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
-    """
-    if system == "win32":
-        if appauthor is None:
-            appauthor = appname
-        path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
-        if appname:
-            if appauthor is not False:
-                path = os.path.join(path, appauthor, appname)
-            else:
-                path = os.path.join(path, appname)
-    elif system == 'darwin':
-        path = os.path.expanduser('/Library/Application Support')
-        if appname:
-            path = os.path.join(path, appname)
-    else:
-        # XDG default for $XDG_DATA_DIRS
-        # only first, if multipath is False
-        path = os.getenv('XDG_DATA_DIRS',
-                         os.pathsep.join(['/usr/local/share', '/usr/share']))
-        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
-        if appname:
-            if version:
-                appname = os.path.join(appname, version)
-            pathlist = [os.sep.join([x, appname]) for x in pathlist]
-
-        if multipath:
-            path = os.pathsep.join(pathlist)
-        else:
-            path = pathlist[0]
-        return path
-
-    if appname and version:
-        path = os.path.join(path, version)
-    return path
-
-
-def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
-    r"""Return full path to the user-specific config dir for this application.
-
-        "appname" is the name of application.
-            If None, just the system directory is returned.
-        "appauthor" (only used on Windows) is the name of the
-            appauthor or distributing body for this application. Typically
-            it is the owning company name. This falls back to appname. You may
-            pass False to disable it.
-        "version" is an optional version path element to append to the
-            path. You might want to use this if you want multiple versions
-            of your app to be able to run independently. If used, this
-            would typically be "<major>.<minor>".
-            Only applied when appname is present.
-        "roaming" (boolean, default False) can be set True to use the Windows
-            roaming appdata directory. That means that for users on a Windows
-            network setup for roaming profiles, this user data will be
-            sync'd on login. See
-            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
-            for a discussion of issues.
-
-    Typical user config directories are:
-        Mac OS X:               same as user_data_dir
-        Unix:                   ~/.config/<AppName>     # or in $XDG_CONFIG_HOME, if defined
-        Win *:                  same as user_data_dir
-
-    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
-    That means, by default "~/.config/<AppName>".
-    """
-    if system in ["win32", "darwin"]:
-        path = user_data_dir(appname, appauthor, None, roaming)
-    else:
-        path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
-        if appname:
-            path = os.path.join(path, appname)
-    if appname and version:
-        path = os.path.join(path, version)
-    return path
-
-
-def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
-    r"""Return full path to the user-shared data dir for this application.
-
-        "appname" is the name of application.
-            If None, just the system directory is returned.
-        "appauthor" (only used on Windows) is the name of the
-            appauthor or distributing body for this application. Typically
-            it is the owning company name. This falls back to appname. You may
-            pass False to disable it.
-        "version" is an optional version path element to append to the
-            path. You might want to use this if you want multiple versions
-            of your app to be able to run independently. If used, this
-            would typically be "<major>.<minor>".
-            Only applied when appname is present.
-        "multipath" is an optional parameter only applicable to *nix
-            which indicates that the entire list of config dirs should be
-            returned. By default, the first item from XDG_CONFIG_DIRS is
-            returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
-
-    Typical site config directories are:
-        Mac OS X:   same as site_data_dir
-        Unix:       /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
-                    $XDG_CONFIG_DIRS
-        Win *:      same as site_data_dir
-        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
-
-    For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
-
-    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
-    """
-    if system in ["win32", "darwin"]:
-        path = site_data_dir(appname, appauthor)
-        if appname and version:
-            path = os.path.join(path, version)
-    else:
-        # XDG default for $XDG_CONFIG_DIRS
-        # only first, if multipath is False
-        path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
-        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
-        if appname:
-            if version:
-                appname = os.path.join(appname, version)
-            pathlist = [os.sep.join([x, appname]) for x in pathlist]
-
-        if multipath:
-            path = os.pathsep.join(pathlist)
-        else:
-            path = pathlist[0]
-    return path
-
-
-def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
-    r"""Return full path to the user-specific cache dir for this application.
-
-        "appname" is the name of application.
-            If None, just the system directory is returned.
-        "appauthor" (only used on Windows) is the name of the
-            appauthor or distributing body for this application. Typically
-            it is the owning company name. This falls back to appname. You may
-            pass False to disable it.
-        "version" is an optional version path element to append to the
-            path. You might want to use this if you want multiple versions
-            of your app to be able to run independently. If used, this
-            would typically be "<major>.<minor>".
-            Only applied when appname is present.
-        "opinion" (boolean) can be False to disable the appending of
-            "Cache" to the base app data dir for Windows. See
-            discussion below.
-
-    Typical user cache directories are:
-        Mac OS X:   ~/Library/Caches/<AppName>
-        Unix:       ~/.cache/<AppName> (XDG default)
-        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
-        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
-
-    On Windows the only suggestion in the MSDN docs is that local settings go in
-    the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
-    app data dir (the default returned by `user_data_dir` above). Apps typically
-    put cache data somewhere *under* the given dir here. Some examples:
-        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
-        ...\Acme\SuperApp\Cache\1.0
-    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
-    This can be disabled with the `opinion=False` option.
-    """
-    if system == "win32":
-        if appauthor is None:
-            appauthor = appname
-        path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
-        if appname:
-            if appauthor is not False:
-                path = os.path.join(path, appauthor, appname)
-            else:
-                path = os.path.join(path, appname)
-            if opinion:
-                path = os.path.join(path, "Cache")
-    elif system == 'darwin':
-        path = os.path.expanduser('~/Library/Caches')
-        if appname:
-            path = os.path.join(path, appname)
-    else:
-        path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
-        if appname:
-            path = os.path.join(path, appname)
-    if appname and version:
-        path = os.path.join(path, version)
-    return path
-
-
-def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
-    r"""Return full path to the user-specific state dir for this application.
-
-        "appname" is the name of application.
-            If None, just the system directory is returned.
-        "appauthor" (only used on Windows) is the name of the
-            appauthor or distributing body for this application. Typically
-            it is the owning company name. This falls back to appname. You may
-            pass False to disable it.
-        "version" is an optional version path element to append to the
-            path. You might want to use this if you want multiple versions
-            of your app to be able to run independently. If used, this
-            would typically be "<major>.<minor>".
-            Only applied when appname is present.
-        "roaming" (boolean, default False) can be set True to use the Windows
-            roaming appdata directory. That means that for users on a Windows
-            network setup for roaming profiles, this user data will be
-            sync'd on login. See
-            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
-            for a discussion of issues.
-
-    Typical user state directories are:
-        Mac OS X:  same as user_data_dir
-        Unix:      ~/.local/state/<AppName>   # or in $XDG_STATE_HOME, if defined
-        Win *:     same as user_data_dir
-
-    For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
-    to extend the XDG spec and support $XDG_STATE_HOME.
-
-    That means, by default "~/.local/state/<AppName>".
-    """
-    if system in ["win32", "darwin"]:
-        path = user_data_dir(appname, appauthor, None, roaming)
-    else:
-        path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
-        if appname:
-            path = os.path.join(path, appname)
-    if appname and version:
-        path = os.path.join(path, version)
-    return path
-
-
-def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
-    r"""Return full path to the user-specific log dir for this application.
-
-        "appname" is the name of application.
-            If None, just the system directory is returned.
-        "appauthor" (only used on Windows) is the name of the
-            appauthor or distributing body for this application. Typically
-            it is the owning company name. This falls back to appname. You may
-            pass False to disable it.
-        "version" is an optional version path element to append to the
-            path. You might want to use this if you want multiple versions
-            of your app to be able to run independently. If used, this
-            would typically be "<major>.<minor>".
-            Only applied when appname is present.
-        "opinion" (boolean) can be False to disable the appending of
-            "Logs" to the base app data dir for Windows, and "log" to the
-            base cache dir for Unix. See discussion below.
-
-    Typical user log directories are:
-        Mac OS X:   ~/Library/Logs/<AppName>
-        Unix:       ~/.cache/<AppName>/log  # or under $XDG_CACHE_HOME if defined
-        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
-        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
-
-    On Windows the only suggestion in the MSDN docs is that local settings
-    go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
-    examples of what some windows apps use for a logs dir.)
-
-    OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
-    value for Windows and appends "log" to the user cache dir for Unix.
-    This can be disabled with the `opinion=False` option.
-    """
-    if system == "darwin":
-        path = os.path.join(
-            os.path.expanduser('~/Library/Logs'),
-            appname)
-    elif system == "win32":
-        path = user_data_dir(appname, appauthor, version)
-        version = False
-        if opinion:
-            path = os.path.join(path, "Logs")
-    else:
-        path = user_cache_dir(appname, appauthor, version)
-        version = False
-        if opinion:
-            path = os.path.join(path, "log")
-    if appname and version:
-        path = os.path.join(path, version)
-    return path
-
-
-class AppDirs(object):
-    """Convenience wrapper for getting application dirs."""
-    def __init__(self, appname=None, appauthor=None, version=None,
-            roaming=False, multipath=False):
-        self.appname = appname
-        self.appauthor = appauthor
-        self.version = version
-        self.roaming = roaming
-        self.multipath = multipath
-
-    @property
-    def user_data_dir(self):
-        return user_data_dir(self.appname, self.appauthor,
-                             version=self.version, roaming=self.roaming)
-
-    @property
-    def site_data_dir(self):
-        return site_data_dir(self.appname, self.appauthor,
-                             version=self.version, multipath=self.multipath)
-
-    @property
-    def user_config_dir(self):
-        return user_config_dir(self.appname, self.appauthor,
-                               version=self.version, roaming=self.roaming)
-
-    @property
-    def site_config_dir(self):
-        return site_config_dir(self.appname, self.appauthor,
-                             version=self.version, multipath=self.multipath)
-
-    @property
-    def user_cache_dir(self):
-        return user_cache_dir(self.appname, self.appauthor,
-                              version=self.version)
-
-    @property
-    def user_state_dir(self):
-        return user_state_dir(self.appname, self.appauthor,
-                              version=self.version)
-
-    @property
-    def user_log_dir(self):
-        return user_log_dir(self.appname, self.appauthor,
-                            version=self.version)
-
-
-#---- internal support stuff
-
-def _get_win_folder_from_registry(csidl_name):
-    """This is a fallback technique at best. I'm not sure if using the
-    registry for this guarantees us the correct answer for all CSIDL_*
-    names.
-    """
-    if PY3:
-      import winreg as _winreg
-    else:
-      import _winreg
-
-    shell_folder_name = {
-        "CSIDL_APPDATA": "AppData",
-        "CSIDL_COMMON_APPDATA": "Common AppData",
-        "CSIDL_LOCAL_APPDATA": "Local AppData",
-    }[csidl_name]
-
-    key = _winreg.OpenKey(
-        _winreg.HKEY_CURRENT_USER,
-        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
-    )
-    dir, type = _winreg.QueryValueEx(key, shell_folder_name)
-    return dir
-
-
-def _get_win_folder_with_pywin32(csidl_name):
-    from win32com.shell import shellcon, shell
-    dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
-    # Try to make this a unicode path because SHGetFolderPath does
-    # not return unicode strings when there is unicode data in the
-    # path.
-    try:
-        dir = unicode(dir)
-
-        # Downgrade to short path name if have highbit chars. See
-        # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
-        has_high_char = False
-        for c in dir:
-            if ord(c) > 255:
-                has_high_char = True
-                break
-        if has_high_char:
-            try:
-                import win32api
-                dir = win32api.GetShortPathName(dir)
-            except ImportError:
-                pass
-    except UnicodeError:
-        pass
-    return dir
-
-
-def _get_win_folder_with_ctypes(csidl_name):
-    import ctypes
-
-    csidl_const = {
-        "CSIDL_APPDATA": 26,
-        "CSIDL_COMMON_APPDATA": 35,
-        "CSIDL_LOCAL_APPDATA": 28,
-    }[csidl_name]
-
-    buf = ctypes.create_unicode_buffer(1024)
-    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
-
-    # Downgrade to short path name if have highbit chars. See
-    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
-    has_high_char = False
-    for c in buf:
-        if ord(c) > 255:
-            has_high_char = True
-            break
-    if has_high_char:
-        buf2 = ctypes.create_unicode_buffer(1024)
-        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
-            buf = buf2
-
-    return buf.value
-
-def _get_win_folder_with_jna(csidl_name):
-    import array
-    from com.sun import jna
-    from com.sun.jna.platform import win32
-
-    buf_size = win32.WinDef.MAX_PATH * 2
-    buf = array.zeros('c', buf_size)
-    shell = win32.Shell32.INSTANCE
-    shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
-    dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-
-    # Downgrade to short path name if have highbit chars. See
-    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
-    has_high_char = False
-    for c in dir:
-        if ord(c) > 255:
-            has_high_char = True
-            break
-    if has_high_char:
-        buf = array.zeros('c', buf_size)
-        kernel = win32.Kernel32.INSTANCE
-        if kernel.GetShortPathName(dir, buf, buf_size):
-            dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-
-    return dir
-
-if system == "win32":
-    try:
-        import win32com.shell
-        _get_win_folder = _get_win_folder_with_pywin32
-    except ImportError:
-        try:
-            from ctypes import windll
-            _get_win_folder = _get_win_folder_with_ctypes
-        except ImportError:
-            try:
-                import com.sun.jna
-                _get_win_folder = _get_win_folder_with_jna
-            except ImportError:
-                _get_win_folder = _get_win_folder_from_registry
-
-
-#---- self test code
-
-if __name__ == "__main__":
-    appname = "MyApp"
-    appauthor = "MyCompany"
-
-    props = ("user_data_dir",
-             "user_config_dir",
-             "user_cache_dir",
-             "user_state_dir",
-             "user_log_dir",
-             "site_data_dir",
-             "site_config_dir")
-
-    print("-- app dirs %s --" % __version__)
-
-    print("-- app dirs (with optional 'version')")
-    dirs = AppDirs(appname, appauthor, version="1.0")
-    for prop in props:
-        print("%s: %s" % (prop, getattr(dirs, prop)))
-
-    print("\n-- app dirs (without optional 'version')")
-    dirs = AppDirs(appname, appauthor)
-    for prop in props:
-        print("%s: %s" % (prop, getattr(dirs, prop)))
-
-    print("\n-- app dirs (without optional 'appauthor')")
-    dirs = AppDirs(appname)
-    for prop in props:
-        print("%s: %s" % (prop, getattr(dirs, prop)))
-
-    print("\n-- app dirs (with disabled 'appauthor')")
-    dirs = AppDirs(appname, appauthor=False)
-    for prop in props:
-        print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py
deleted file mode 100644
index 34e3a99..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Read resources contained within a package."""
-
-from ._common import (
-    as_file,
-    files,
-    Package,
-)
-
-from ._legacy import (
-    contents,
-    open_binary,
-    read_binary,
-    open_text,
-    read_text,
-    is_resource,
-    path,
-    Resource,
-)
-
-from .abc import ResourceReader
-
-
-__all__ = [
-    'Package',
-    'Resource',
-    'ResourceReader',
-    'as_file',
-    'contents',
-    'files',
-    'is_resource',
-    'open_binary',
-    'open_text',
-    'path',
-    'read_binary',
-    'read_text',
-]
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/__init__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/__init__.cpython-310.pyc
deleted file mode 100644
index f506f68..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/__init__.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_adapters.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_adapters.cpython-310.pyc
deleted file mode 100644
index 9f7d2d4..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_adapters.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_common.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_common.cpython-310.pyc
deleted file mode 100644
index ea91445..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_common.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_compat.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_compat.cpython-310.pyc
deleted file mode 100644
index 710c037..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_compat.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_itertools.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_itertools.cpython-310.pyc
deleted file mode 100644
index 9f54cc7..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_itertools.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_legacy.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_legacy.cpython-310.pyc
deleted file mode 100644
index 667262b..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/_legacy.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/abc.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/abc.cpython-310.pyc
deleted file mode 100644
index f0ac81c..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/abc.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/readers.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/readers.cpython-310.pyc
deleted file mode 100644
index 2a95586..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/readers.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/simple.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/simple.cpython-310.pyc
deleted file mode 100644
index a39e0e3..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/__pycache__/simple.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py
deleted file mode 100644
index ea363d8..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py
+++ /dev/null
@@ -1,170 +0,0 @@
-from contextlib import suppress
-from io import TextIOWrapper
-
-from . import abc
-
-
-class SpecLoaderAdapter:
-    """
-    Adapt a package spec to adapt the underlying loader.
-    """
-
-    def __init__(self, spec, adapter=lambda spec: spec.loader):
-        self.spec = spec
-        self.loader = adapter(spec)
-
-    def __getattr__(self, name):
-        return getattr(self.spec, name)
-
-
-class TraversableResourcesLoader:
-    """
-    Adapt a loader to provide TraversableResources.
-    """
-
-    def __init__(self, spec):
-        self.spec = spec
-
-    def get_resource_reader(self, name):
-        return CompatibilityFiles(self.spec)._native()
-
-
-def _io_wrapper(file, mode='r', *args, **kwargs):
-    if mode == 'r':
-        return TextIOWrapper(file, *args, **kwargs)
-    elif mode == 'rb':
-        return file
-    raise ValueError(
-        "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
-    )
-
-
-class CompatibilityFiles:
-    """
-    Adapter for an existing or non-existent resource reader
-    to provide a compatibility .files().
-    """
-
-    class SpecPath(abc.Traversable):
-        """
-        Path tied to a module spec.
-        Can be read and exposes the resource reader children.
-        """
-
-        def __init__(self, spec, reader):
-            self._spec = spec
-            self._reader = reader
-
-        def iterdir(self):
-            if not self._reader:
-                return iter(())
-            return iter(
-                CompatibilityFiles.ChildPath(self._reader, path)
-                for path in self._reader.contents()
-            )
-
-        def is_file(self):
-            return False
-
-        is_dir = is_file
-
-        def joinpath(self, other):
-            if not self._reader:
-                return CompatibilityFiles.OrphanPath(other)
-            return CompatibilityFiles.ChildPath(self._reader, other)
-
-        @property
-        def name(self):
-            return self._spec.name
-
-        def open(self, mode='r', *args, **kwargs):
-            return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
-
-    class ChildPath(abc.Traversable):
-        """
-        Path tied to a resource reader child.
-        Can be read but doesn't expose any meaningful children.
-        """
-
-        def __init__(self, reader, name):
-            self._reader = reader
-            self._name = name
-
-        def iterdir(self):
-            return iter(())
-
-        def is_file(self):
-            return self._reader.is_resource(self.name)
-
-        def is_dir(self):
-            return not self.is_file()
-
-        def joinpath(self, other):
-            return CompatibilityFiles.OrphanPath(self.name, other)
-
-        @property
-        def name(self):
-            return self._name
-
-        def open(self, mode='r', *args, **kwargs):
-            return _io_wrapper(
-                self._reader.open_resource(self.name), mode, *args, **kwargs
-            )
-
-    class OrphanPath(abc.Traversable):
-        """
-        Orphan path, not tied to a module spec or resource reader.
-        Can't be read and doesn't expose any meaningful children.
-        """
-
-        def __init__(self, *path_parts):
-            if len(path_parts) < 1:
-                raise ValueError('Need at least one path part to construct a path')
-            self._path = path_parts
-
-        def iterdir(self):
-            return iter(())
-
-        def is_file(self):
-            return False
-
-        is_dir = is_file
-
-        def joinpath(self, other):
-            return CompatibilityFiles.OrphanPath(*self._path, other)
-
-        @property
-        def name(self):
-            return self._path[-1]
-
-        def open(self, mode='r', *args, **kwargs):
-            raise FileNotFoundError("Can't open orphan path")
-
-    def __init__(self, spec):
-        self.spec = spec
-
-    @property
-    def _reader(self):
-        with suppress(AttributeError):
-            return self.spec.loader.get_resource_reader(self.spec.name)
-
-    def _native(self):
-        """
-        Return the native reader if it supports files().
-        """
-        reader = self._reader
-        return reader if hasattr(reader, 'files') else self
-
-    def __getattr__(self, attr):
-        return getattr(self._reader, attr)
-
-    def files(self):
-        return CompatibilityFiles.SpecPath(self.spec, self._reader)
-
-
-def wrap_spec(package):
-    """
-    Construct a package spec with traversable compatibility
-    on the spec/loader/reader.
-    """
-    return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_common.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_common.py
deleted file mode 100644
index a12e2c7..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_common.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import os
-import pathlib
-import tempfile
-import functools
-import contextlib
-import types
-import importlib
-
-from typing import Union, Optional
-from .abc import ResourceReader, Traversable
-
-from ._compat import wrap_spec
-
-Package = Union[types.ModuleType, str]
-
-
-def files(package):
-    # type: (Package) -> Traversable
-    """
-    Get a Traversable resource from a package
-    """
-    return from_package(get_package(package))
-
-
-def get_resource_reader(package):
-    # type: (types.ModuleType) -> Optional[ResourceReader]
-    """
-    Return the package's loader if it's a ResourceReader.
-    """
-    # We can't use
-    # a issubclass() check here because apparently abc.'s __subclasscheck__()
-    # hook wants to create a weak reference to the object, but
-    # zipimport.zipimporter does not support weak references, resulting in a
-    # TypeError.  That seems terrible.
-    spec = package.__spec__
-    reader = getattr(spec.loader, 'get_resource_reader', None)  # type: ignore
-    if reader is None:
-        return None
-    return reader(spec.name)  # type: ignore
-
-
-def resolve(cand):
-    # type: (Package) -> types.ModuleType
-    return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand)
-
-
-def get_package(package):
-    # type: (Package) -> types.ModuleType
-    """Take a package name or module object and return the module.
-
-    Raise an exception if the resolved module is not a package.
-    """
-    resolved = resolve(package)
-    if wrap_spec(resolved).submodule_search_locations is None:
-        raise TypeError(f'{package!r} is not a package')
-    return resolved
-
-
-def from_package(package):
-    """
-    Return a Traversable object for the given package.
-
-    """
-    spec = wrap_spec(package)
-    reader = spec.loader.get_resource_reader(spec.name)
-    return reader.files()
-
-
-@contextlib.contextmanager
-def _tempfile(reader, suffix=''):
-    # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
-    # blocks due to the need to close the temporary file to work on Windows
-    # properly.
-    fd, raw_path = tempfile.mkstemp(suffix=suffix)
-    try:
-        try:
-            os.write(fd, reader())
-        finally:
-            os.close(fd)
-        del reader
-        yield pathlib.Path(raw_path)
-    finally:
-        try:
-            os.remove(raw_path)
-        except FileNotFoundError:
-            pass
-
-
-@functools.singledispatch
-def as_file(path):
-    """
-    Given a Traversable object, return that object as a
-    path on the local file system in a context manager.
-    """
-    return _tempfile(path.read_bytes, suffix=path.name)
-
-
-@as_file.register(pathlib.Path)
-@contextlib.contextmanager
-def _(path):
-    """
-    Degenerate behavior for pathlib.Path objects.
-    """
-    yield path
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py
deleted file mode 100644
index cb9fc82..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# flake8: noqa
-
-import abc
-import sys
-import pathlib
-from contextlib import suppress
-
-if sys.version_info >= (3, 10):
-    from zipfile import Path as ZipPath  # type: ignore
-else:
-    from ..zipp import Path as ZipPath  # type: ignore
-
-
-try:
-    from typing import runtime_checkable  # type: ignore
-except ImportError:
-
-    def runtime_checkable(cls):  # type: ignore
-        return cls
-
-
-try:
-    from typing import Protocol  # type: ignore
-except ImportError:
-    Protocol = abc.ABC  # type: ignore
-
-
-class TraversableResourcesLoader:
-    """
-    Adapt loaders to provide TraversableResources and other
-    compatibility.
-
-    Used primarily for Python 3.9 and earlier where the native
-    loaders do not yet implement TraversableResources.
-    """
-
-    def __init__(self, spec):
-        self.spec = spec
-
-    @property
-    def path(self):
-        return self.spec.origin
-
-    def get_resource_reader(self, name):
-        from . import readers, _adapters
-
-        def _zip_reader(spec):
-            with suppress(AttributeError):
-                return readers.ZipReader(spec.loader, spec.name)
-
-        def _namespace_reader(spec):
-            with suppress(AttributeError, ValueError):
-                return readers.NamespaceReader(spec.submodule_search_locations)
-
-        def _available_reader(spec):
-            with suppress(AttributeError):
-                return spec.loader.get_resource_reader(spec.name)
-
-        def _native_reader(spec):
-            reader = _available_reader(spec)
-            return reader if hasattr(reader, 'files') else None
-
-        def _file_reader(spec):
-            try:
-                path = pathlib.Path(self.path)
-            except TypeError:
-                return None
-            if path.exists():
-                return readers.FileReader(self)
-
-        return (
-            # native reader if it supplies 'files'
-            _native_reader(self.spec)
-            or
-            # local ZipReader if a zip module
-            _zip_reader(self.spec)
-            or
-            # local NamespaceReader if a namespace module
-            _namespace_reader(self.spec)
-            or
-            # local FileReader
-            _file_reader(self.spec)
-            # fallback - adapt the spec ResourceReader to TraversableReader
-            or _adapters.CompatibilityFiles(self.spec)
-        )
-
-
-def wrap_spec(package):
-    """
-    Construct a package spec with traversable compatibility
-    on the spec/loader/reader.
-
-    Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
-    from above for older Python compatibility (<3.10).
-    """
-    from . import _adapters
-
-    return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py
deleted file mode 100644
index cce0558..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from itertools import filterfalse
-
-from typing import (
-    Callable,
-    Iterable,
-    Iterator,
-    Optional,
-    Set,
-    TypeVar,
-    Union,
-)
-
-# Type and type variable definitions
-_T = TypeVar('_T')
-_U = TypeVar('_U')
-
-
-def unique_everseen(
-    iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
-) -> Iterator[_T]:
-    "List unique elements, preserving order. Remember all elements ever seen."
-    # unique_everseen('AAAABBBCCDAABBB') --> A B C D
-    # unique_everseen('ABBCcAD', str.lower) --> A B C D
-    seen: Set[Union[_T, _U]] = set()
-    seen_add = seen.add
-    if key is None:
-        for element in filterfalse(seen.__contains__, iterable):
-            seen_add(element)
-            yield element
-    else:
-        for element in iterable:
-            k = key(element)
-            if k not in seen:
-                seen_add(k)
-                yield element
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py
deleted file mode 100644
index 1d5d3f1..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import functools
-import os
-import pathlib
-import types
-import warnings
-
-from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
-
-from . import _common
-
-Package = Union[types.ModuleType, str]
-Resource = str
-
-
-def deprecated(func):
-    @functools.wraps(func)
-    def wrapper(*args, **kwargs):
-        warnings.warn(
-            f"{func.__name__} is deprecated. Use files() instead. "
-            "Refer to https://importlib-resources.readthedocs.io"
-            "/en/latest/using.html#migrating-from-legacy for migration advice.",
-            DeprecationWarning,
-            stacklevel=2,
-        )
-        return func(*args, **kwargs)
-
-    return wrapper
-
-
-def normalize_path(path):
-    # type: (Any) -> str
-    """Normalize a path by ensuring it is a string.
-
-    If the resulting string contains path separators, an exception is raised.
-    """
-    str_path = str(path)
-    parent, file_name = os.path.split(str_path)
-    if parent:
-        raise ValueError(f'{path!r} must be only a file name')
-    return file_name
-
-
-@deprecated
-def open_binary(package: Package, resource: Resource) -> BinaryIO:
-    """Return a file-like object opened for binary reading of the resource."""
-    return (_common.files(package) / normalize_path(resource)).open('rb')
-
-
-@deprecated
-def read_binary(package: Package, resource: Resource) -> bytes:
-    """Return the binary contents of the resource."""
-    return (_common.files(package) / normalize_path(resource)).read_bytes()
-
-
-@deprecated
-def open_text(
-    package: Package,
-    resource: Resource,
-    encoding: str = 'utf-8',
-    errors: str = 'strict',
-) -> TextIO:
-    """Return a file-like object opened for text reading of the resource."""
-    return (_common.files(package) / normalize_path(resource)).open(
-        'r', encoding=encoding, errors=errors
-    )
-
-
-@deprecated
-def read_text(
-    package: Package,
-    resource: Resource,
-    encoding: str = 'utf-8',
-    errors: str = 'strict',
-) -> str:
-    """Return the decoded string of the resource.
-
-    The decoding-related arguments have the same semantics as those of
-    bytes.decode().
-    """
-    with open_text(package, resource, encoding, errors) as fp:
-        return fp.read()
-
-
-@deprecated
-def contents(package: Package) -> Iterable[str]:
-    """Return an iterable of entries in `package`.
-
-    Note that not all entries are resources.  Specifically, directories are
-    not considered resources.  Use `is_resource()` on each entry returned here
-    to check if it is a resource or not.
-    """
-    return [path.name for path in _common.files(package).iterdir()]
-
-
-@deprecated
-def is_resource(package: Package, name: str) -> bool:
-    """True if `name` is a resource inside `package`.
-
-    Directories are *not* resources.
-    """
-    resource = normalize_path(name)
-    return any(
-        traversable.name == resource and traversable.is_file()
-        for traversable in _common.files(package).iterdir()
-    )
-
-
-@deprecated
-def path(
-    package: Package,
-    resource: Resource,
-) -> ContextManager[pathlib.Path]:
-    """A context manager providing a file path object to the resource.
-
-    If the resource does not already exist on its own on the file system,
-    a temporary file will be created. If the file was created, the file
-    will be deleted upon exiting the context manager (no exception is
-    raised if the file was deleted prior to the context manager
-    exiting).
-    """
-    return _common.as_file(_common.files(package) / normalize_path(resource))
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/abc.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/abc.py
deleted file mode 100644
index d39dc1a..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/abc.py
+++ /dev/null
@@ -1,137 +0,0 @@
-import abc
-from typing import BinaryIO, Iterable, Text
-
-from ._compat import runtime_checkable, Protocol
-
-
-class ResourceReader(metaclass=abc.ABCMeta):
-    """Abstract base class for loaders to provide resource reading support."""
-
-    @abc.abstractmethod
-    def open_resource(self, resource: Text) -> BinaryIO:
-        """Return an opened, file-like object for binary reading.
-
-        The 'resource' argument is expected to represent only a file name.
-        If the resource cannot be found, FileNotFoundError is raised.
-        """
-        # This deliberately raises FileNotFoundError instead of
-        # NotImplementedError so that if this method is accidentally called,
-        # it'll still do the right thing.
-        raise FileNotFoundError
-
-    @abc.abstractmethod
-    def resource_path(self, resource: Text) -> Text:
-        """Return the file system path to the specified resource.
-
-        The 'resource' argument is expected to represent only a file name.
-        If the resource does not exist on the file system, raise
-        FileNotFoundError.
-        """
-        # This deliberately raises FileNotFoundError instead of
-        # NotImplementedError so that if this method is accidentally called,
-        # it'll still do the right thing.
-        raise FileNotFoundError
-
-    @abc.abstractmethod
-    def is_resource(self, path: Text) -> bool:
-        """Return True if the named 'path' is a resource.
-
-        Files are resources, directories are not.
-        """
-        raise FileNotFoundError
-
-    @abc.abstractmethod
-    def contents(self) -> Iterable[str]:
-        """Return an iterable of entries in `package`."""
-        raise FileNotFoundError
-
-
-@runtime_checkable
-class Traversable(Protocol):
-    """
-    An object with a subset of pathlib.Path methods suitable for
-    traversing directories and opening files.
-    """
-
-    @abc.abstractmethod
-    def iterdir(self):
-        """
-        Yield Traversable objects in self
-        """
-
-    def read_bytes(self):
-        """
-        Read contents of self as bytes
-        """
-        with self.open('rb') as strm:
-            return strm.read()
-
-    def read_text(self, encoding=None):
-        """
-        Read contents of self as text
-        """
-        with self.open(encoding=encoding) as strm:
-            return strm.read()
-
-    @abc.abstractmethod
-    def is_dir(self) -> bool:
-        """
-        Return True if self is a directory
-        """
-
-    @abc.abstractmethod
-    def is_file(self) -> bool:
-        """
-        Return True if self is a file
-        """
-
-    @abc.abstractmethod
-    def joinpath(self, child):
-        """
-        Return Traversable child in self
-        """
-
-    def __truediv__(self, child):
-        """
-        Return Traversable child in self
-        """
-        return self.joinpath(child)
-
-    @abc.abstractmethod
-    def open(self, mode='r', *args, **kwargs):
-        """
-        mode may be 'r' or 'rb' to open as text or binary. Return a handle
-        suitable for reading (same as pathlib.Path.open).
-
-        When opening as text, accepts encoding parameters such as those
-        accepted by io.TextIOWrapper.
-        """
-
-    @abc.abstractproperty
-    def name(self) -> str:
-        """
-        The base name of this object without any parent references.
-        """
-
-
-class TraversableResources(ResourceReader):
-    """
-    The required interface for providing traversable
-    resources.
-    """
-
-    @abc.abstractmethod
-    def files(self):
-        """Return a Traversable object for the loaded package."""
-
-    def open_resource(self, resource):
-        return self.files().joinpath(resource).open('rb')
-
-    def resource_path(self, resource):
-        raise FileNotFoundError(resource)
-
-    def is_resource(self, path):
-        return self.files().joinpath(path).is_file()
-
-    def contents(self):
-        return (item.name for item in self.files().iterdir())
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/readers.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/readers.py
deleted file mode 100644
index f1190ca..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/readers.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import collections
-import pathlib
-import operator
-
-from . import abc
-
-from ._itertools import unique_everseen
-from ._compat import ZipPath
-
-
-def remove_duplicates(items):
-    return iter(collections.OrderedDict.fromkeys(items))
-
-
-class FileReader(abc.TraversableResources):
-    def __init__(self, loader):
-        self.path = pathlib.Path(loader.path).parent
-
-    def resource_path(self, resource):
-        """
-        Return the file system path to prevent
-        `resources.path()` from creating a temporary
-        copy.
-        """
-        return str(self.path.joinpath(resource))
-
-    def files(self):
-        return self.path
-
-
-class ZipReader(abc.TraversableResources):
-    def __init__(self, loader, module):
-        _, _, name = module.rpartition('.')
-        self.prefix = loader.prefix.replace('\\', '/') + name + '/'
-        self.archive = loader.archive
-
-    def open_resource(self, resource):
-        try:
-            return super().open_resource(resource)
-        except KeyError as exc:
-            raise FileNotFoundError(exc.args[0])
-
-    def is_resource(self, path):
-        # workaround for `zipfile.Path.is_file` returning true
-        # for non-existent paths.
-        target = self.files().joinpath(path)
-        return target.is_file() and target.exists()
-
-    def files(self):
-        return ZipPath(self.archive, self.prefix)
-
-
-class MultiplexedPath(abc.Traversable):
-    """
-    Given a series of Traversable objects, implement a merged
-    version of the interface across all objects. Useful for
-    namespace packages which may be multihomed at a single
-    name.
-    """
-
-    def __init__(self, *paths):
-        self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
-        if not self._paths:
-            message = 'MultiplexedPath must contain at least one path'
-            raise FileNotFoundError(message)
-        if not all(path.is_dir() for path in self._paths):
-            raise NotADirectoryError('MultiplexedPath only supports directories')
-
-    def iterdir(self):
-        files = (file for path in self._paths for file in path.iterdir())
-        return unique_everseen(files, key=operator.attrgetter('name'))
-
-    def read_bytes(self):
-        raise FileNotFoundError(f'{self} is not a file')
-
-    def read_text(self, *args, **kwargs):
-        raise FileNotFoundError(f'{self} is not a file')
-
-    def is_dir(self):
-        return True
-
-    def is_file(self):
-        return False
-
-    def joinpath(self, child):
-        # first try to find child in current paths
-        for file in self.iterdir():
-            if file.name == child:
-                return file
-        # if it does not exist, construct it with the first path
-        return self._paths[0] / child
-
-    __truediv__ = joinpath
-
-    def open(self, *args, **kwargs):
-        raise FileNotFoundError(f'{self} is not a file')
-
-    @property
-    def name(self):
-        return self._paths[0].name
-
-    def __repr__(self):
-        paths = ', '.join(f"'{path}'" for path in self._paths)
-        return f'MultiplexedPath({paths})'
-
-
-class NamespaceReader(abc.TraversableResources):
-    def __init__(self, namespace_path):
-        if 'NamespacePath' not in str(namespace_path):
-            raise ValueError('Invalid path')
-        self.path = MultiplexedPath(*list(namespace_path))
-
-    def resource_path(self, resource):
-        """
-        Return the file system path to prevent
-        `resources.path()` from creating a temporary
-        copy.
-        """
-        return str(self.path.joinpath(resource))
-
-    def files(self):
-        return self.path
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/simple.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/simple.py
deleted file mode 100644
index da073cb..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/importlib_resources/simple.py
+++ /dev/null
@@ -1,116 +0,0 @@
-"""
-Interface adapters for low-level readers.
-"""
-
-import abc
-import io
-import itertools
-from typing import BinaryIO, List
-
-from .abc import Traversable, TraversableResources
-
-
-class SimpleReader(abc.ABC):
-    """
-    The minimum, low-level interface required from a resource
-    provider.
-    """
-
-    @abc.abstractproperty
-    def package(self):
-        # type: () -> str
-        """
-        The name of the package for which this reader loads resources.
-        """
-
-    @abc.abstractmethod
-    def children(self):
-        # type: () -> List['SimpleReader']
-        """
-        Obtain an iterable of SimpleReader for available
-        child containers (e.g. directories).
-        """
-
-    @abc.abstractmethod
-    def resources(self):
-        # type: () -> List[str]
-        """
-        Obtain available named resources for this virtual package.
-        """
-
-    @abc.abstractmethod
-    def open_binary(self, resource):
-        # type: (str) -> BinaryIO
-        """
-        Obtain a File-like for a named resource.
-        """
-
-    @property
-    def name(self):
-        return self.package.split('.')[-1]
-
-
-class ResourceHandle(Traversable):
-    """
-    Handle to a named resource in a ResourceReader.
-    """
-
-    def __init__(self, parent, name):
-        # type: (ResourceContainer, str) -> None
-        self.parent = parent
-        self.name = name  # type: ignore
-
-    def is_file(self):
-        return True
-
-    def is_dir(self):
-        return False
-
-    def open(self, mode='r', *args, **kwargs):
-        stream = self.parent.reader.open_binary(self.name)
-        if 'b' not in mode:
-            stream = io.TextIOWrapper(*args, **kwargs)
-        return stream
-
-    def joinpath(self, name):
-        raise RuntimeError("Cannot traverse into a resource")
-
-
-class ResourceContainer(Traversable):
-    """
-    Traversable container for a package's resources via its reader.
-    """
-
-    def __init__(self, reader):
-        # type: (SimpleReader) -> None
-        self.reader = reader
-
-    def is_dir(self):
-        return True
-
-    def is_file(self):
-        return False
-
-    def iterdir(self):
-        files = (ResourceHandle(self, name) for name in self.reader.resources)
-        dirs = map(ResourceContainer, self.reader.children())
-        return itertools.chain(files, dirs)
-
-    def open(self, *args, **kwargs):
-        raise IsADirectoryError()
-
-    def joinpath(self, name):
-        return next(
-            traversable for traversable in self.iterdir() if traversable.name == name
-        )
-
-
-class TraversableReader(TraversableResources, SimpleReader):
-    """
-    A TraversableResources based on SimpleReader. Resource providers
-    may derive from this class to provide the TraversableResources
-    interface by supplying the SimpleReader interface.
-    """
-
-    def files(self):
-        return ResourceContainer(self)
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__init__.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/__init__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/__init__.cpython-310.pyc
deleted file mode 100644
index 327ba33..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/__init__.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/context.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/context.cpython-310.pyc
deleted file mode 100644
index 534d0d5..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/context.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/functools.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/functools.cpython-310.pyc
deleted file mode 100644
index 3714dae..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/__pycache__/functools.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/context.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/context.py
deleted file mode 100644
index 87a4e3d..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/context.py
+++ /dev/null
@@ -1,213 +0,0 @@
-import os
-import subprocess
-import contextlib
-import functools
-import tempfile
-import shutil
-import operator
-
-
-@contextlib.contextmanager
-def pushd(dir):
-    orig = os.getcwd()
-    os.chdir(dir)
-    try:
-        yield dir
-    finally:
-        os.chdir(orig)
-
-
-@contextlib.contextmanager
-def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
-    """
-    Get a tarball, extract it, change to that directory, yield, then
-    clean up.
-    `runner` is the function to invoke commands.
-    `pushd` is a context manager for changing the directory.
-    """
-    if target_dir is None:
-        target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
-    if runner is None:
-        runner = functools.partial(subprocess.check_call, shell=True)
-    # In the tar command, use --strip-components=1 to strip the first path and
-    #  then
-    #  use -C to cause the files to be extracted to {target_dir}. This ensures
-    #  that we always know where the files were extracted.
-    runner('mkdir {target_dir}'.format(**vars()))
-    try:
-        getter = 'wget {url} -O -'
-        extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
-        cmd = ' | '.join((getter, extract))
-        runner(cmd.format(compression=infer_compression(url), **vars()))
-        with pushd(target_dir):
-            yield target_dir
-    finally:
-        runner('rm -Rf {target_dir}'.format(**vars()))
-
-
-def infer_compression(url):
-    """
-    Given a URL or filename, infer the compression code for tar.
-    """
-    # cheat and just assume it's the last two characters
-    compression_indicator = url[-2:]
-    mapping = dict(gz='z', bz='j', xz='J')
-    # Assume 'z' (gzip) if no match
-    return mapping.get(compression_indicator, 'z')
-
-
-@contextlib.contextmanager
-def temp_dir(remover=shutil.rmtree):
-    """
-    Create a temporary directory context. Pass a custom remover
-    to override the removal behavior.
-    """
-    temp_dir = tempfile.mkdtemp()
-    try:
-        yield temp_dir
-    finally:
-        remover(temp_dir)
-
-
-@contextlib.contextmanager
-def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
-    """
-    Check out the repo indicated by url.
-
-    If dest_ctx is supplied, it should be a context manager
-    to yield the target directory for the check out.
-    """
-    exe = 'git' if 'git' in url else 'hg'
-    with dest_ctx() as repo_dir:
-        cmd = [exe, 'clone', url, repo_dir]
-        if branch:
-            cmd.extend(['--branch', branch])
-        devnull = open(os.path.devnull, 'w')
-        stdout = devnull if quiet else None
-        subprocess.check_call(cmd, stdout=stdout)
-        yield repo_dir
-
-
-@contextlib.contextmanager
-def null():
-    yield
-
-
-class ExceptionTrap:
-    """
-    A context manager that will catch certain exceptions and provide an
-    indication they occurred.
-
-    >>> with ExceptionTrap() as trap:
-    ...     raise Exception()
-    >>> bool(trap)
-    True
-
-    >>> with ExceptionTrap() as trap:
-    ...     pass
-    >>> bool(trap)
-    False
-
-    >>> with ExceptionTrap(ValueError) as trap:
-    ...     raise ValueError("1 + 1 is not 3")
-    >>> bool(trap)
-    True
-
-    >>> with ExceptionTrap(ValueError) as trap:
-    ...     raise Exception()
-    Traceback (most recent call last):
-    ...
-    Exception
-
-    >>> bool(trap)
-    False
-    """
-
-    exc_info = None, None, None
-
-    def __init__(self, exceptions=(Exception,)):
-        self.exceptions = exceptions
-
-    def __enter__(self):
-        return self
-
-    @property
-    def type(self):
-        return self.exc_info[0]
-
-    @property
-    def value(self):
-        return self.exc_info[1]
-
-    @property
-    def tb(self):
-        return self.exc_info[2]
-
-    def __exit__(self, *exc_info):
-        type = exc_info[0]
-        matches = type and issubclass(type, self.exceptions)
-        if matches:
-            self.exc_info = exc_info
-        return matches
-
-    def __bool__(self):
-        return bool(self.type)
-
-    def raises(self, func, *, _test=bool):
-        """
-        Wrap func and replace the result with the truth
-        value of the trap (True if an exception occurred).
-
-        First, give the decorator an alias to support Python 3.8
-        Syntax.
-
-        >>> raises = ExceptionTrap(ValueError).raises
-
-        Now decorate a function that always fails.
-
-        >>> @raises
-        ... def fail():
-        ...     raise ValueError('failed')
-        >>> fail()
-        True
-        """
-
-        @functools.wraps(func)
-        def wrapper(*args, **kwargs):
-            with ExceptionTrap(self.exceptions) as trap:
-                func(*args, **kwargs)
-            return _test(trap)
-
-        return wrapper
-
-    def passes(self, func):
-        """
-        Wrap func and replace the result with the truth
-        value of the trap (True if no exception).
-
-        First, give the decorator an alias to support Python 3.8
-        Syntax.
-
-        >>> passes = ExceptionTrap(ValueError).passes
-
-        Now decorate a function that always fails.
-
-        >>> @passes
-        ... def fail():
-        ...     raise ValueError('failed')
-
-        >>> fail()
-        False
-        """
-        return self.raises(func, _test=operator.not_)
-
-
-class suppress(contextlib.suppress, contextlib.ContextDecorator):
-    """
-    A version of contextlib.suppress with decorator support.
-
-    >>> @suppress(KeyError)
-    ... def key_error():
-    ...     {}['']
-    >>> key_error()
-    """
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/functools.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/functools.py
deleted file mode 100644
index a3fea3a..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/functools.py
+++ /dev/null
@@ -1,525 +0,0 @@
-import functools
-import time
-import inspect
-import collections
-import types
-import itertools
-
-import pkg_resources.extern.more_itertools
-
-from typing import Callable, TypeVar
-
-
-CallableT = TypeVar("CallableT", bound=Callable[..., object])
-
-
-def compose(*funcs):
-    """
-    Compose any number of unary functions into a single unary function.
-
-    >>> import textwrap
-    >>> expected = str.strip(textwrap.dedent(compose.__doc__))
-    >>> strip_and_dedent = compose(str.strip, textwrap.dedent)
-    >>> strip_and_dedent(compose.__doc__) == expected
-    True
-
-    Compose also allows the innermost function to take arbitrary arguments.
-
-    >>> round_three = lambda x: round(x, ndigits=3)
-    >>> f = compose(round_three, int.__truediv__)
-    >>> [f(3*x, x+1) for x in range(1,10)]
-    [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
-    """
-
-    def compose_two(f1, f2):
-        return lambda *args, **kwargs: f1(f2(*args, **kwargs))
-
-    return functools.reduce(compose_two, funcs)
-
-
-def method_caller(method_name, *args, **kwargs):
-    """
-    Return a function that will call a named method on the
-    target object with optional positional and keyword
-    arguments.
-
-    >>> lower = method_caller('lower')
-    >>> lower('MyString')
-    'mystring'
-    """
-
-    def call_method(target):
-        func = getattr(target, method_name)
-        return func(*args, **kwargs)
-
-    return call_method
-
-
-def once(func):
-    """
-    Decorate func so it's only ever called the first time.
-
-    This decorator can ensure that an expensive or non-idempotent function
-    will not be expensive on subsequent calls and is idempotent.
-
-    >>> add_three = once(lambda a: a+3)
-    >>> add_three(3)
-    6
-    >>> add_three(9)
-    6
-    >>> add_three('12')
-    6
-
-    To reset the stored value, simply clear the property ``saved_result``.
-
-    >>> del add_three.saved_result
-    >>> add_three(9)
-    12
-    >>> add_three(8)
-    12
-
-    Or invoke 'reset()' on it.
-
-    >>> add_three.reset()
-    >>> add_three(-3)
-    0
-    >>> add_three(0)
-    0
-    """
-
-    @functools.wraps(func)
-    def wrapper(*args, **kwargs):
-        if not hasattr(wrapper, 'saved_result'):
-            wrapper.saved_result = func(*args, **kwargs)
-        return wrapper.saved_result
-
-    wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
-    return wrapper
-
-
-def method_cache(
-    method: CallableT,
-    cache_wrapper: Callable[
-        [CallableT], CallableT
-    ] = functools.lru_cache(),  # type: ignore[assignment]
-) -> CallableT:
-    """
-    Wrap lru_cache to support storing the cache data in the object instances.
-
-    Abstracts the common paradigm where the method explicitly saves an
-    underscore-prefixed protected property on first call and returns that
-    subsequently.
-
-    >>> class MyClass:
-    ...     calls = 0
-    ...
-    ...     @method_cache
-    ...     def method(self, value):
-    ...         self.calls += 1
-    ...         return value
-
-    >>> a = MyClass()
-    >>> a.method(3)
-    3
-    >>> for x in range(75):
-    ...     res = a.method(x)
-    >>> a.calls
-    75
-
-    Note that the apparent behavior will be exactly like that of lru_cache
-    except that the cache is stored on each instance, so values in one
-    instance will not flush values from another, and when an instance is
-    deleted, so are the cached values for that instance.
-
-    >>> b = MyClass()
-    >>> for x in range(35):
-    ...     res = b.method(x)
-    >>> b.calls
-    35
-    >>> a.method(0)
-    0
-    >>> a.calls
-    75
-
-    Note that if method had been decorated with ``functools.lru_cache()``,
-    a.calls would have been 76 (due to the cached value of 0 having been
-    flushed by the 'b' instance).
-
-    Clear the cache with ``.cache_clear()``
-
-    >>> a.method.cache_clear()
-
-    Same for a method that hasn't yet been called.
-
-    >>> c = MyClass()
-    >>> c.method.cache_clear()
-
-    Another cache wrapper may be supplied:
-
-    >>> cache = functools.lru_cache(maxsize=2)
-    >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
-    >>> a = MyClass()
-    >>> a.method2()
-    3
-
-    Caution - do not subsequently wrap the method with another decorator, such
-    as ``@property``, which changes the semantics of the function.
-
-    See also
-    http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
-    for another implementation and additional justification.
-    """
-
-    def wrapper(self: object, *args: object, **kwargs: object) -> object:
-        # it's the first call, replace the method with a cached, bound method
-        bound_method: CallableT = types.MethodType(  # type: ignore[assignment]
-            method, self
-        )
-        cached_method = cache_wrapper(bound_method)
-        setattr(self, method.__name__, cached_method)
-        return cached_method(*args, **kwargs)
-
-    # Support cache clear even before cache has been created.
-    wrapper.cache_clear = lambda: None  # type: ignore[attr-defined]
-
-    return (  # type: ignore[return-value]
-        _special_method_cache(method, cache_wrapper) or wrapper
-    )
-
-
-def _special_method_cache(method, cache_wrapper):
-    """
-    Because Python treats special methods differently, it's not
-    possible to use instance attributes to implement the cached
-    methods.
-
-    Instead, install the wrapper method under a different name
-    and return a simple proxy to that wrapper.
-
-    https://github.com/jaraco/jaraco.functools/issues/5
-    """
-    name = method.__name__
-    special_names = '__getattr__', '__getitem__'
-    if name not in special_names:
-        return
-
-    wrapper_name = '__cached' + name
-
-    def proxy(self, *args, **kwargs):
-        if wrapper_name not in vars(self):
-            bound = types.MethodType(method, self)
-            cache = cache_wrapper(bound)
-            setattr(self, wrapper_name, cache)
-        else:
-            cache = getattr(self, wrapper_name)
-        return cache(*args, **kwargs)
-
-    return proxy
-
-
-def apply(transform):
-    """
-    Decorate a function with a transform function that is
-    invoked on results returned from the decorated function.
-
-    >>> @apply(reversed)
-    ... def get_numbers(start):
-    ...     "doc for get_numbers"
-    ...     return range(start, start+3)
-    >>> list(get_numbers(4))
-    [6, 5, 4]
-    >>> get_numbers.__doc__
-    'doc for get_numbers'
-    """
-
-    def wrap(func):
-        return functools.wraps(func)(compose(transform, func))
-
-    return wrap
-
-
-def result_invoke(action):
-    r"""
-    Decorate a function with an action function that is
-    invoked on the results returned from the decorated
-    function (for its side-effect), then return the original
-    result.
-
-    >>> @result_invoke(print)
-    ... def add_two(a, b):
-    ...     return a + b
-    >>> x = add_two(2, 3)
-    5
-    >>> x
-    5
-    """
-
-    def wrap(func):
-        @functools.wraps(func)
-        def wrapper(*args, **kwargs):
-            result = func(*args, **kwargs)
-            action(result)
-            return result
-
-        return wrapper
-
-    return wrap
-
-
-def call_aside(f, *args, **kwargs):
-    """
-    Call a function for its side effect after initialization.
-
-    >>> @call_aside
-    ... def func(): print("called")
-    called
-    >>> func()
-    called
-
-    Use functools.partial to pass parameters to the initial call
-
-    >>> @functools.partial(call_aside, name='bingo')
-    ... def func(name): print("called with", name)
-    called with bingo
-    """
-    f(*args, **kwargs)
-    return f
-
-
-class Throttler:
-    """
-    Rate-limit a function (or other callable)
-    """
-
-    def __init__(self, func, max_rate=float('Inf')):
-        if isinstance(func, Throttler):
-            func = func.func
-        self.func = func
-        self.max_rate = max_rate
-        self.reset()
-
-    def reset(self):
-        self.last_called = 0
-
-    def __call__(self, *args, **kwargs):
-        self._wait()
-        return self.func(*args, **kwargs)
-
-    def _wait(self):
-        "ensure at least 1/max_rate seconds from last call"
-        elapsed = time.time() - self.last_called
-        must_wait = 1 / self.max_rate - elapsed
-        time.sleep(max(0, must_wait))
-        self.last_called = time.time()
-
-    def __get__(self, obj, type=None):
-        return first_invoke(self._wait, functools.partial(self.func, obj))
-
-
-def first_invoke(func1, func2):
-    """
-    Return a function that when invoked will invoke func1 without
-    any parameters (for its side-effect) and then invoke func2
-    with whatever parameters were passed, returning its result.
-    """
-
-    def wrapper(*args, **kwargs):
-        func1()
-        return func2(*args, **kwargs)
-
-    return wrapper
-
-
-def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
-    """
-    Given a callable func, trap the indicated exceptions
-    for up to 'retries' times, invoking cleanup on the
-    exception. On the final attempt, allow any exceptions
-    to propagate.
-    """
-    attempts = itertools.count() if retries == float('inf') else range(retries)
-    for attempt in attempts:
-        try:
-            return func()
-        except trap:
-            cleanup()
-
-    return func()
-
-
-def retry(*r_args, **r_kwargs):
-    """
-    Decorator wrapper for retry_call. Accepts arguments to retry_call
-    except func and then returns a decorator for the decorated function.
-
-    Ex:
-
-    >>> @retry(retries=3)
-    ... def my_func(a, b):
-    ...     "this is my funk"
-    ...     print(a, b)
-    >>> my_func.__doc__
-    'this is my funk'
-    """
-
-    def decorate(func):
-        @functools.wraps(func)
-        def wrapper(*f_args, **f_kwargs):
-            bound = functools.partial(func, *f_args, **f_kwargs)
-            return retry_call(bound, *r_args, **r_kwargs)
-
-        return wrapper
-
-    return decorate
-
-
-def print_yielded(func):
-    """
-    Convert a generator into a function that prints all yielded elements
-
-    >>> @print_yielded
-    ... def x():
-    ...     yield 3; yield None
-    >>> x()
-    3
-    None
-    """
-    print_all = functools.partial(map, print)
-    print_results = compose(more_itertools.consume, print_all, func)
-    return functools.wraps(func)(print_results)
-
-
-def pass_none(func):
-    """
-    Wrap func so it's not called if its first param is None
-
-    >>> print_text = pass_none(print)
-    >>> print_text('text')
-    text
-    >>> print_text(None)
-    """
-
-    @functools.wraps(func)
-    def wrapper(param, *args, **kwargs):
-        if param is not None:
-            return func(param, *args, **kwargs)
-
-    return wrapper
-
-
-def assign_params(func, namespace):
-    """
-    Assign parameters from namespace where func solicits.
-
-    >>> def func(x, y=3):
-    ...     print(x, y)
-    >>> assigned = assign_params(func, dict(x=2, z=4))
-    >>> assigned()
-    2 3
-
-    The usual errors are raised if a function doesn't receive
-    its required parameters:
-
-    >>> assigned = assign_params(func, dict(y=3, z=4))
-    >>> assigned()
-    Traceback (most recent call last):
-    TypeError: func() ...argument...
-
-    It even works on methods:
-
-    >>> class Handler:
-    ...     def meth(self, arg):
-    ...         print(arg)
-    >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
-    crystal
-    """
-    sig = inspect.signature(func)
-    params = sig.parameters.keys()
-    call_ns = {k: namespace[k] for k in params if k in namespace}
-    return functools.partial(func, **call_ns)
-
-
-def save_method_args(method):
-    """
-    Wrap a method such that when it is called, the args and kwargs are
-    saved on the method.
-
-    >>> class MyClass:
-    ...     @save_method_args
-    ...     def method(self, a, b):
-    ...         print(a, b)
-    >>> my_ob = MyClass()
-    >>> my_ob.method(1, 2)
-    1 2
-    >>> my_ob._saved_method.args
-    (1, 2)
-    >>> my_ob._saved_method.kwargs
-    {}
-    >>> my_ob.method(a=3, b='foo')
-    3 foo
-    >>> my_ob._saved_method.args
-    ()
-    >>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
-    True
-
-    The arguments are stored on the instance, allowing for
-    different instance to save different args.
-
-    >>> your_ob = MyClass()
-    >>> your_ob.method({str('x'): 3}, b=[4])
-    {'x': 3} [4]
-    >>> your_ob._saved_method.args
-    ({'x': 3},)
-    >>> my_ob._saved_method.args
-    ()
-    """
-    args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
-
-    @functools.wraps(method)
-    def wrapper(self, *args, **kwargs):
-        attr_name = '_saved_' + method.__name__
-        attr = args_and_kwargs(args, kwargs)
-        setattr(self, attr_name, attr)
-        return method(self, *args, **kwargs)
-
-    return wrapper
-
-
-def except_(*exceptions, replace=None, use=None):
-    """
-    Replace the indicated exceptions, if raised, with the indicated
-    literal replacement or evaluated expression (if present).
-
-    >>> safe_int = except_(ValueError)(int)
-    >>> safe_int('five')
-    >>> safe_int('5')
-    5
-
-    Specify a literal replacement with ``replace``.
-
-    >>> safe_int_r = except_(ValueError, replace=0)(int)
-    >>> safe_int_r('five')
-    0
-
-    Provide an expression to ``use`` to pass through particular parameters.
-
-    >>> safe_int_pt = except_(ValueError, use='args[0]')(int)
-    >>> safe_int_pt('five')
-    'five'
-
-    """
-
-    def decorate(func):
-        @functools.wraps(func)
-        def wrapper(*args, **kwargs):
-            try:
-                return func(*args, **kwargs)
-            except exceptions:
-                try:
-                    return eval(use)
-                except TypeError:
-                    return replace
-
-        return wrapper
-
-    return decorate
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py
deleted file mode 100644
index c466378..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py
+++ /dev/null
@@ -1,599 +0,0 @@
-import re
-import itertools
-import textwrap
-import functools
-
-try:
-    from importlib.resources import files  # type: ignore
-except ImportError:  # pragma: nocover
-    from pkg_resources.extern.importlib_resources import files  # type: ignore
-
-from pkg_resources.extern.jaraco.functools import compose, method_cache
-from pkg_resources.extern.jaraco.context import ExceptionTrap
-
-
-def substitution(old, new):
-    """
-    Return a function that will perform a substitution on a string
-    """
-    return lambda s: s.replace(old, new)
-
-
-def multi_substitution(*substitutions):
-    """
-    Take a sequence of pairs specifying substitutions, and create
-    a function that performs those substitutions.
-
-    >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
-    'baz'
-    """
-    substitutions = itertools.starmap(substitution, substitutions)
-    # compose function applies last function first, so reverse the
-    #  substitutions to get the expected order.
-    substitutions = reversed(tuple(substitutions))
-    return compose(*substitutions)
-
-
-class FoldedCase(str):
-    """
-    A case insensitive string class; behaves just like str
-    except compares equal when the only variation is case.
-
-    >>> s = FoldedCase('hello world')
-
-    >>> s == 'Hello World'
-    True
-
-    >>> 'Hello World' == s
-    True
-
-    >>> s != 'Hello World'
-    False
-
-    >>> s.index('O')
-    4
-
-    >>> s.split('O')
-    ['hell', ' w', 'rld']
-
-    >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
-    ['alpha', 'Beta', 'GAMMA']
-
-    Sequence membership is straightforward.
-
-    >>> "Hello World" in [s]
-    True
-    >>> s in ["Hello World"]
-    True
-
-    You may test for set inclusion, but candidate and elements
-    must both be folded.
-
-    >>> FoldedCase("Hello World") in {s}
-    True
-    >>> s in {FoldedCase("Hello World")}
-    True
-
-    String inclusion works as long as the FoldedCase object
-    is on the right.
-
-    >>> "hello" in FoldedCase("Hello World")
-    True
-
-    But not if the FoldedCase object is on the left:
-
-    >>> FoldedCase('hello') in 'Hello World'
-    False
-
-    In that case, use ``in_``:
-
-    >>> FoldedCase('hello').in_('Hello World')
-    True
-
-    >>> FoldedCase('hello') > FoldedCase('Hello')
-    False
-    """
-
-    def __lt__(self, other):
-        return self.lower() < other.lower()
-
-    def __gt__(self, other):
-        return self.lower() > other.lower()
-
-    def __eq__(self, other):
-        return self.lower() == other.lower()
-
-    def __ne__(self, other):
-        return self.lower() != other.lower()
-
-    def __hash__(self):
-        return hash(self.lower())
-
-    def __contains__(self, other):
-        return super().lower().__contains__(other.lower())
-
-    def in_(self, other):
-        "Does self appear in other?"
-        return self in FoldedCase(other)
-
-    # cache lower since it's likely to be called frequently.
-    @method_cache
-    def lower(self):
-        return super().lower()
-
-    def index(self, sub):
-        return self.lower().index(sub.lower())
-
-    def split(self, splitter=' ', maxsplit=0):
-        pattern = re.compile(re.escape(splitter), re.I)
-        return pattern.split(self, maxsplit)
-
-
-# Python 3.8 compatibility
-_unicode_trap = ExceptionTrap(UnicodeDecodeError)
-
-
-@_unicode_trap.passes
-def is_decodable(value):
-    r"""
-    Return True if the supplied value is decodable (using the default
-    encoding).
-
-    >>> is_decodable(b'\xff')
-    False
-    >>> is_decodable(b'\x32')
-    True
-    """
-    value.decode()
-
-
-def is_binary(value):
-    r"""
-    Return True if the value appears to be binary (that is, it's a byte
-    string and isn't decodable).
-
-    >>> is_binary(b'\xff')
-    True
-    >>> is_binary('\xff')
-    False
-    """
-    return isinstance(value, bytes) and not is_decodable(value)
-
-
-def trim(s):
-    r"""
-    Trim something like a docstring to remove the whitespace that
-    is common due to indentation and formatting.
-
-    >>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
-    'foo = bar\n\tbar = baz'
-    """
-    return textwrap.dedent(s).strip()
-
-
-def wrap(s):
-    """
-    Wrap lines of text, retaining existing newlines as
-    paragraph markers.
-
-    >>> print(wrap(lorem_ipsum))
-    Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
-    eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
-    minim veniam, quis nostrud exercitation ullamco laboris nisi ut
-    aliquip ex ea commodo consequat. Duis aute irure dolor in
-    reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
-    pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
-    culpa qui officia deserunt mollit anim id est laborum.
-    <BLANKLINE>
-    Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
-    varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
-    magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
-    gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
-    risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
-    eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
-    fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
-    a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
-    neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
-    sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
-    nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
-    quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
-    molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
-    """
-    paragraphs = s.splitlines()
-    wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
-    return '\n\n'.join(wrapped)
-
-
-def unwrap(s):
-    r"""
-    Given a multi-line string, return an unwrapped version.
-
-    >>> wrapped = wrap(lorem_ipsum)
-    >>> wrapped.count('\n')
-    20
-    >>> unwrapped = unwrap(wrapped)
-    >>> unwrapped.count('\n')
-    1
-    >>> print(unwrapped)
-    Lorem ipsum dolor sit amet, consectetur adipiscing ...
-    Curabitur pretium tincidunt lacus. Nulla gravida orci ...
-
-    """
-    paragraphs = re.split(r'\n\n+', s)
-    cleaned = (para.replace('\n', ' ') for para in paragraphs)
-    return '\n'.join(cleaned)
-
-
-
-
-class Splitter(object):
-    """object that will split a string with the given arguments for each call
-
-    >>> s = Splitter(',')
-    >>> s('hello, world, this is your, master calling')
-    ['hello', ' world', ' this is your', ' master calling']
-    """
-
-    def __init__(self, *args):
-        self.args = args
-
-    def __call__(self, s):
-        return s.split(*self.args)
-
-
-def indent(string, prefix=' ' * 4):
-    """
-    >>> indent('foo')
-    '    foo'
-    """
-    return prefix + string
-
-
-class WordSet(tuple):
-    """
-    Given an identifier, return the words that identifier represents,
-    whether in camel case, underscore-separated, etc.
-
-    >>> WordSet.parse("camelCase")
-    ('camel', 'Case')
-
-    >>> WordSet.parse("under_sep")
-    ('under', 'sep')
-
-    Acronyms should be retained
-
-    >>> WordSet.parse("firstSNL")
-    ('first', 'SNL')
-
-    >>> WordSet.parse("you_and_I")
-    ('you', 'and', 'I')
-
-    >>> WordSet.parse("A simple test")
-    ('A', 'simple', 'test')
-
-    Multiple caps should not interfere with the first cap of another word.
-
-    >>> WordSet.parse("myABCClass")
-    ('my', 'ABC', 'Class')
-
-    The result is a WordSet, so you can get the form you need.
-
-    >>> WordSet.parse("myABCClass").underscore_separated()
-    'my_ABC_Class'
-
-    >>> WordSet.parse('a-command').camel_case()
-    'ACommand'
-
-    >>> WordSet.parse('someIdentifier').lowered().space_separated()
-    'some identifier'
-
-    Slices of the result should return another WordSet.
-
-    >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
-    'out_of_context'
-
-    >>> WordSet.from_class_name(WordSet()).lowered().space_separated()
-    'word set'
-
-    >>> example = WordSet.parse('figured it out')
-    >>> example.headless_camel_case()
-    'figuredItOut'
-    >>> example.dash_separated()
-    'figured-it-out'
-
-    """
-
-    _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
-
-    def capitalized(self):
-        return WordSet(word.capitalize() for word in self)
-
-    def lowered(self):
-        return WordSet(word.lower() for word in self)
-
-    def camel_case(self):
-        return ''.join(self.capitalized())
-
-    def headless_camel_case(self):
-        words = iter(self)
-        first = next(words).lower()
-        new_words = itertools.chain((first,), WordSet(words).camel_case())
-        return ''.join(new_words)
-
-    def underscore_separated(self):
-        return '_'.join(self)
-
-    def dash_separated(self):
-        return '-'.join(self)
-
-    def space_separated(self):
-        return ' '.join(self)
-
-    def trim_right(self, item):
-        """
-        Remove the item from the end of the set.
-
-        >>> WordSet.parse('foo bar').trim_right('foo')
-        ('foo', 'bar')
-        >>> WordSet.parse('foo bar').trim_right('bar')
-        ('foo',)
-        >>> WordSet.parse('').trim_right('bar')
-        ()
-        """
-        return self[:-1] if self and self[-1] == item else self
-
-    def trim_left(self, item):
-        """
-        Remove the item from the beginning of the set.
-
-        >>> WordSet.parse('foo bar').trim_left('foo')
-        ('bar',)
-        >>> WordSet.parse('foo bar').trim_left('bar')
-        ('foo', 'bar')
-        >>> WordSet.parse('').trim_left('bar')
-        ()
-        """
-        return self[1:] if self and self[0] == item else self
-
-    def trim(self, item):
-        """
-        >>> WordSet.parse('foo bar').trim('foo')
-        ('bar',)
-        """
-        return self.trim_left(item).trim_right(item)
-
-    def __getitem__(self, item):
-        result = super(WordSet, self).__getitem__(item)
-        if isinstance(item, slice):
-            result = WordSet(result)
-        return result
-
-    @classmethod
-    def parse(cls, identifier):
-        matches = cls._pattern.finditer(identifier)
-        return WordSet(match.group(0) for match in matches)
-
-    @classmethod
-    def from_class_name(cls, subject):
-        return cls.parse(subject.__class__.__name__)
-
-
-# for backward compatibility
-words = WordSet.parse
-
-
-def simple_html_strip(s):
-    r"""
-    Remove HTML from the string `s`.
-
-    >>> str(simple_html_strip(''))
-    ''
-
-    >>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
-    A stormy day in paradise
-
-    >>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
-    Somebody  tell the truth.
-
-    >>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
-    What about
-    multiple lines?
-    """
-    html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
-    texts = (match.group(3) or '' for match in html_stripper.finditer(s))
-    return ''.join(texts)
-
-
-class SeparatedValues(str):
-    """
-    A string separated by a separator. Overrides __iter__ for getting
-    the values.
-
-    >>> list(SeparatedValues('a,b,c'))
-    ['a', 'b', 'c']
-
-    Whitespace is stripped and empty values are discarded.
-
-    >>> list(SeparatedValues(' a,   b   , c,  '))
-    ['a', 'b', 'c']
-    """
-
-    separator = ','
-
-    def __iter__(self):
-        parts = self.split(self.separator)
-        return filter(None, (part.strip() for part in parts))
-
-
-class Stripper:
-    r"""
-    Given a series of lines, find the common prefix and strip it from them.
-
-    >>> lines = [
-    ...     'abcdefg\n',
-    ...     'abc\n',
-    ...     'abcde\n',
-    ... ]
-    >>> res = Stripper.strip_prefix(lines)
-    >>> res.prefix
-    'abc'
-    >>> list(res.lines)
-    ['defg\n', '\n', 'de\n']
-
-    If no prefix is common, nothing should be stripped.
-
-    >>> lines = [
-    ...     'abcd\n',
-    ...     '1234\n',
-    ... ]
-    >>> res = Stripper.strip_prefix(lines)
-    >>> res.prefix = ''
-    >>> list(res.lines)
-    ['abcd\n', '1234\n']
-    """
-
-    def __init__(self, prefix, lines):
-        self.prefix = prefix
-        self.lines = map(self, lines)
-
-    @classmethod
-    def strip_prefix(cls, lines):
-        prefix_lines, lines = itertools.tee(lines)
-        prefix = functools.reduce(cls.common_prefix, prefix_lines)
-        return cls(prefix, lines)
-
-    def __call__(self, line):
-        if not self.prefix:
-            return line
-        null, prefix, rest = line.partition(self.prefix)
-        return rest
-
-    @staticmethod
-    def common_prefix(s1, s2):
-        """
-        Return the common prefix of two lines.
-        """
-        index = min(len(s1), len(s2))
-        while s1[:index] != s2[:index]:
-            index -= 1
-        return s1[:index]
-
-
-def remove_prefix(text, prefix):
-    """
-    Remove the prefix from the text if it exists.
-
-    >>> remove_prefix('underwhelming performance', 'underwhelming ')
-    'performance'
-
-    >>> remove_prefix('something special', 'sample')
-    'something special'
-    """
-    null, prefix, rest = text.rpartition(prefix)
-    return rest
-
-
-def remove_suffix(text, suffix):
-    """
-    Remove the suffix from the text if it exists.
-
-    >>> remove_suffix('name.git', '.git')
-    'name'
-
-    >>> remove_suffix('something special', 'sample')
-    'something special'
-    """
-    rest, suffix, null = text.partition(suffix)
-    return rest
-
-
-def normalize_newlines(text):
-    r"""
-    Replace alternate newlines with the canonical newline.
-
-    >>> normalize_newlines('Lorem Ipsum\u2029')
-    'Lorem Ipsum\n'
-    >>> normalize_newlines('Lorem Ipsum\r\n')
-    'Lorem Ipsum\n'
-    >>> normalize_newlines('Lorem Ipsum\x85')
-    'Lorem Ipsum\n'
-    """
-    newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
-    pattern = '|'.join(newlines)
-    return re.sub(pattern, '\n', text)
-
-
-def _nonblank(str):
-    return str and not str.startswith('#')
-
-
-@functools.singledispatch
-def yield_lines(iterable):
-    r"""
-    Yield valid lines of a string or iterable.
-
-    >>> list(yield_lines(''))
-    []
-    >>> list(yield_lines(['foo', 'bar']))
-    ['foo', 'bar']
-    >>> list(yield_lines('foo\nbar'))
-    ['foo', 'bar']
-    >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
-    ['foo', 'baz #comment']
-    >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
-    ['foo', 'bar', 'baz', 'bing']
-    """
-    return itertools.chain.from_iterable(map(yield_lines, iterable))
-
-
-@yield_lines.register(str)
-def _(text):
-    return filter(_nonblank, map(str.strip, text.splitlines()))
-
-
-def drop_comment(line):
-    """
-    Drop comments.
-
-    >>> drop_comment('foo # bar')
-    'foo'
-
-    A hash without a space may be in a URL.
-
-    >>> drop_comment('http://example.com/foo#bar')
-    'http://example.com/foo#bar'
-    """
-    return line.partition(' #')[0]
-
-
-def join_continuation(lines):
-    r"""
-    Join lines continued by a trailing backslash.
-
-    >>> list(join_continuation(['foo \\', 'bar', 'baz']))
-    ['foobar', 'baz']
-    >>> list(join_continuation(['foo \\', 'bar', 'baz']))
-    ['foobar', 'baz']
-    >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
-    ['foobarbaz']
-
-    Not sure why, but...
-    The character preceeding the backslash is also elided.
-
-    >>> list(join_continuation(['goo\\', 'dly']))
-    ['godly']
-
-    A terrible idea, but...
-    If no line is available to continue, suppress the lines.
-
-    >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
-    ['foo']
-    """
-    lines = iter(lines)
-    for item in lines:
-        while item.endswith('\\'):
-            try:
-                item = item[:-2].strip() + next(lines)
-            except StopIteration:
-                return
-        yield item
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__pycache__/__init__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__pycache__/__init__.cpython-310.pyc
deleted file mode 100644
index 6b0b109..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/jaraco/text/__pycache__/__init__.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__init__.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__init__.py
deleted file mode 100644
index ea38bef..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .more import *  # noqa
-from .recipes import *  # noqa
-
-__version__ = '8.12.0'
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/__init__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/__init__.cpython-310.pyc
deleted file mode 100644
index 03fbb7b..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/__init__.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/more.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/more.cpython-310.pyc
deleted file mode 100644
index 5000bbc..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/more.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/recipes.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/recipes.cpython-310.pyc
deleted file mode 100644
index 08139cd..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/recipes.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/more.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/more.py
deleted file mode 100644
index 6b6a5ca..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/more.py
+++ /dev/null
@@ -1,4316 +0,0 @@
-import warnings
-
-from collections import Counter, defaultdict, deque, abc
-from collections.abc import Sequence
-from functools import partial, reduce, wraps
-from heapq import merge, heapify, heapreplace, heappop
-from itertools import (
-    chain,
-    compress,
-    count,
-    cycle,
-    dropwhile,
-    groupby,
-    islice,
-    repeat,
-    starmap,
-    takewhile,
-    tee,
-    zip_longest,
-)
-from math import exp, factorial, floor, log
-from queue import Empty, Queue
-from random import random, randrange, uniform
-from operator import itemgetter, mul, sub, gt, lt, ge, le
-from sys import hexversion, maxsize
-from time import monotonic
-
-from .recipes import (
-    consume,
-    flatten,
-    pairwise,
-    powerset,
-    take,
-    unique_everseen,
-)
-
-__all__ = [
-    'AbortThread',
-    'SequenceView',
-    'UnequalIterablesError',
-    'adjacent',
-    'all_unique',
-    'always_iterable',
-    'always_reversible',
-    'bucket',
-    'callback_iter',
-    'chunked',
-    'chunked_even',
-    'circular_shifts',
-    'collapse',
-    'collate',
-    'combination_index',
-    'consecutive_groups',
-    'consumer',
-    'count_cycle',
-    'countable',
-    'difference',
-    'distinct_combinations',
-    'distinct_permutations',
-    'distribute',
-    'divide',
-    'duplicates_everseen',
-    'duplicates_justseen',
-    'exactly_n',
-    'filter_except',
-    'first',
-    'groupby_transform',
-    'ichunked',
-    'ilen',
-    'interleave',
-    'interleave_evenly',
-    'interleave_longest',
-    'intersperse',
-    'is_sorted',
-    'islice_extended',
-    'iterate',
-    'last',
-    'locate',
-    'lstrip',
-    'make_decorator',
-    'map_except',
-    'map_if',
-    'map_reduce',
-    'mark_ends',
-    'minmax',
-    'nth_or_last',
-    'nth_permutation',
-    'nth_product',
-    'numeric_range',
-    'one',
-    'only',
-    'padded',
-    'partitions',
-    'peekable',
-    'permutation_index',
-    'product_index',
-    'raise_',
-    'repeat_each',
-    'repeat_last',
-    'replace',
-    'rlocate',
-    'rstrip',
-    'run_length',
-    'sample',
-    'seekable',
-    'set_partitions',
-    'side_effect',
-    'sliced',
-    'sort_together',
-    'split_after',
-    'split_at',
-    'split_before',
-    'split_into',
-    'split_when',
-    'spy',
-    'stagger',
-    'strip',
-    'strictly_n',
-    'substrings',
-    'substrings_indexes',
-    'time_limited',
-    'unique_in_window',
-    'unique_to_each',
-    'unzip',
-    'value_chain',
-    'windowed',
-    'windowed_complete',
-    'with_iter',
-    'zip_broadcast',
-    'zip_equal',
-    'zip_offset',
-]
-
-
-_marker = object()
-
-
-def chunked(iterable, n, strict=False):
-    """Break *iterable* into lists of length *n*:
-
-        >>> list(chunked([1, 2, 3, 4, 5, 6], 3))
-        [[1, 2, 3], [4, 5, 6]]
-
-    By the default, the last yielded list will have fewer than *n* elements
-    if the length of *iterable* is not divisible by *n*:
-
-        >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
-        [[1, 2, 3], [4, 5, 6], [7, 8]]
-
-    To use a fill-in value instead, see the :func:`grouper` recipe.
-
-    If the length of *iterable* is not divisible by *n* and *strict* is
-    ``True``, then ``ValueError`` will be raised before the last
-    list is yielded.
-
-    """
-    iterator = iter(partial(take, n, iter(iterable)), [])
-    if strict:
-        if n is None:
-            raise ValueError('n must not be None when using strict mode.')
-
-        def ret():
-            for chunk in iterator:
-                if len(chunk) != n:
-                    raise ValueError('iterable is not divisible by n.')
-                yield chunk
-
-        return iter(ret())
-    else:
-        return iterator
-
-
-def first(iterable, default=_marker):
-    """Return the first item of *iterable*, or *default* if *iterable* is
-    empty.
-
-        >>> first([0, 1, 2, 3])
-        0
-        >>> first([], 'some default')
-        'some default'
-
-    If *default* is not provided and there are no items in the iterable,
-    raise ``ValueError``.
-
-    :func:`first` is useful when you have a generator of expensive-to-retrieve
-    values and want any arbitrary one. It is marginally shorter than
-    ``next(iter(iterable), default)``.
-
-    """
-    try:
-        return next(iter(iterable))
-    except StopIteration as e:
-        if default is _marker:
-            raise ValueError(
-                'first() was called on an empty iterable, and no '
-                'default value was provided.'
-            ) from e
-        return default
-
-
-def last(iterable, default=_marker):
-    """Return the last item of *iterable*, or *default* if *iterable* is
-    empty.
-
-        >>> last([0, 1, 2, 3])
-        3
-        >>> last([], 'some default')
-        'some default'
-
-    If *default* is not provided and there are no items in the iterable,
-    raise ``ValueError``.
-    """
-    try:
-        if isinstance(iterable, Sequence):
-            return iterable[-1]
-        # Work around https://bugs.python.org/issue38525
-        elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
-            return next(reversed(iterable))
-        else:
-            return deque(iterable, maxlen=1)[-1]
-    except (IndexError, TypeError, StopIteration):
-        if default is _marker:
-            raise ValueError(
-                'last() was called on an empty iterable, and no default was '
-                'provided.'
-            )
-        return default
-
-
-def nth_or_last(iterable, n, default=_marker):
-    """Return the nth or the last item of *iterable*,
-    or *default* if *iterable* is empty.
-
-        >>> nth_or_last([0, 1, 2, 3], 2)
-        2
-        >>> nth_or_last([0, 1], 2)
-        1
-        >>> nth_or_last([], 0, 'some default')
-        'some default'
-
-    If *default* is not provided and there are no items in the iterable,
-    raise ``ValueError``.
-    """
-    return last(islice(iterable, n + 1), default=default)
-
-
-class peekable:
-    """Wrap an iterator to allow lookahead and prepending elements.
-
-    Call :meth:`peek` on the result to get the value that will be returned
-    by :func:`next`. This won't advance the iterator:
-
-        >>> p = peekable(['a', 'b'])
-        >>> p.peek()
-        'a'
-        >>> next(p)
-        'a'
-
-    Pass :meth:`peek` a default value to return that instead of raising
-    ``StopIteration`` when the iterator is exhausted.
-
-        >>> p = peekable([])
-        >>> p.peek('hi')
-        'hi'
-
-    peekables also offer a :meth:`prepend` method, which "inserts" items
-    at the head of the iterable:
-
-        >>> p = peekable([1, 2, 3])
-        >>> p.prepend(10, 11, 12)
-        >>> next(p)
-        10
-        >>> p.peek()
-        11
-        >>> list(p)
-        [11, 12, 1, 2, 3]
-
-    peekables can be indexed. Index 0 is the item that will be returned by
-    :func:`next`, index 1 is the item after that, and so on:
-    The values up to the given index will be cached.
-
-        >>> p = peekable(['a', 'b', 'c', 'd'])
-        >>> p[0]
-        'a'
-        >>> p[1]
-        'b'
-        >>> next(p)
-        'a'
-
-    Negative indexes are supported, but be aware that they will cache the
-    remaining items in the source iterator, which may require significant
-    storage.
-
-    To check whether a peekable is exhausted, check its truth value:
-
-        >>> p = peekable(['a', 'b'])
-        >>> if p:  # peekable has items
-        ...     list(p)
-        ['a', 'b']
-        >>> if not p:  # peekable is exhausted
-        ...     list(p)
-        []
-
-    """
-
-    def __init__(self, iterable):
-        self._it = iter(iterable)
-        self._cache = deque()
-
-    def __iter__(self):
-        return self
-
-    def __bool__(self):
-        try:
-            self.peek()
-        except StopIteration:
-            return False
-        return True
-
-    def peek(self, default=_marker):
-        """Return the item that will be next returned from ``next()``.
-
-        Return ``default`` if there are no items left. If ``default`` is not
-        provided, raise ``StopIteration``.
-
-        """
-        if not self._cache:
-            try:
-                self._cache.append(next(self._it))
-            except StopIteration:
-                if default is _marker:
-                    raise
-                return default
-        return self._cache[0]
-
-    def prepend(self, *items):
-        """Stack up items to be the next ones returned from ``next()`` or
-        ``self.peek()``. The items will be returned in
-        first in, first out order::
-
-            >>> p = peekable([1, 2, 3])
-            >>> p.prepend(10, 11, 12)
-            >>> next(p)
-            10
-            >>> list(p)
-            [11, 12, 1, 2, 3]
-
-        It is possible, by prepending items, to "resurrect" a peekable that
-        previously raised ``StopIteration``.
-
-            >>> p = peekable([])
-            >>> next(p)
-            Traceback (most recent call last):
-              ...
-            StopIteration
-            >>> p.prepend(1)
-            >>> next(p)
-            1
-            >>> next(p)
-            Traceback (most recent call last):
-              ...
-            StopIteration
-
-        """
-        self._cache.extendleft(reversed(items))
-
-    def __next__(self):
-        if self._cache:
-            return self._cache.popleft()
-
-        return next(self._it)
-
-    def _get_slice(self, index):
-        # Normalize the slice's arguments
-        step = 1 if (index.step is None) else index.step
-        if step > 0:
-            start = 0 if (index.start is None) else index.start
-            stop = maxsize if (index.stop is None) else index.stop
-        elif step < 0:
-            start = -1 if (index.start is None) else index.start
-            stop = (-maxsize - 1) if (index.stop is None) else index.stop
-        else:
-            raise ValueError('slice step cannot be zero')
-
-        # If either the start or stop index is negative, we'll need to cache
-        # the rest of the iterable in order to slice from the right side.
-        if (start < 0) or (stop < 0):
-            self._cache.extend(self._it)
-        # Otherwise we'll need to find the rightmost index and cache to that
-        # point.
-        else:
-            n = min(max(start, stop) + 1, maxsize)
-            cache_len = len(self._cache)
-            if n >= cache_len:
-                self._cache.extend(islice(self._it, n - cache_len))
-
-        return list(self._cache)[index]
-
-    def __getitem__(self, index):
-        if isinstance(index, slice):
-            return self._get_slice(index)
-
-        cache_len = len(self._cache)
-        if index < 0:
-            self._cache.extend(self._it)
-        elif index >= cache_len:
-            self._cache.extend(islice(self._it, index + 1 - cache_len))
-
-        return self._cache[index]
-
-
-def collate(*iterables, **kwargs):
-    """Return a sorted merge of the items from each of several already-sorted
-    *iterables*.
-
-        >>> list(collate('ACDZ', 'AZ', 'JKL'))
-        ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
-
-    Works lazily, keeping only the next value from each iterable in memory. Use
-    :func:`collate` to, for example, perform a n-way mergesort of items that
-    don't fit in memory.
-
-    If a *key* function is specified, the iterables will be sorted according
-    to its result:
-
-        >>> key = lambda s: int(s)  # Sort by numeric value, not by string
-        >>> list(collate(['1', '10'], ['2', '11'], key=key))
-        ['1', '2', '10', '11']
-
-
-    If the *iterables* are sorted in descending order, set *reverse* to
-    ``True``:
-
-        >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
-        [5, 4, 3, 2, 1, 0]
-
-    If the elements of the passed-in iterables are out of order, you might get
-    unexpected results.
-
-    On Python 3.5+, this function is an alias for :func:`heapq.merge`.
-
-    """
-    warnings.warn(
-        "collate is no longer part of more_itertools, use heapq.merge",
-        DeprecationWarning,
-    )
-    return merge(*iterables, **kwargs)
-
-
-def consumer(func):
-    """Decorator that automatically advances a PEP-342-style "reverse iterator"
-    to its first yield point so you don't have to call ``next()`` on it
-    manually.
-
-        >>> @consumer
-        ... def tally():
-        ...     i = 0
-        ...     while True:
-        ...         print('Thing number %s is %s.' % (i, (yield)))
-        ...         i += 1
-        ...
-        >>> t = tally()
-        >>> t.send('red')
-        Thing number 0 is red.
-        >>> t.send('fish')
-        Thing number 1 is fish.
-
-    Without the decorator, you would have to call ``next(t)`` before
-    ``t.send()`` could be used.
-
-    """
-
-    @wraps(func)
-    def wrapper(*args, **kwargs):
-        gen = func(*args, **kwargs)
-        next(gen)
-        return gen
-
-    return wrapper
-
-
-def ilen(iterable):
-    """Return the number of items in *iterable*.
-
-        >>> ilen(x for x in range(1000000) if x % 3 == 0)
-        333334
-
-    This consumes the iterable, so handle with care.
-
-    """
-    # This approach was selected because benchmarks showed it's likely the
-    # fastest of the known implementations at the time of writing.
-    # See GitHub tracker: #236, #230.
-    counter = count()
-    deque(zip(iterable, counter), maxlen=0)
-    return next(counter)
-
-
-def iterate(func, start):
-    """Return ``start``, ``func(start)``, ``func(func(start))``, ...
-
-    >>> from itertools import islice
-    >>> list(islice(iterate(lambda x: 2*x, 1), 10))
-    [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
-    """
-    while True:
-        yield start
-        start = func(start)
-
-
-def with_iter(context_manager):
-    """Wrap an iterable in a ``with`` statement, so it closes once exhausted.
-
-    For example, this will close the file when the iterator is exhausted::
-
-        upper_lines = (line.upper() for line in with_iter(open('foo')))
-
-    Any context manager which returns an iterable is a candidate for
-    ``with_iter``.
-
-    """
-    with context_manager as iterable:
-        yield from iterable
-
-
-def one(iterable, too_short=None, too_long=None):
-    """Return the first item from *iterable*, which is expected to contain only
-    that item. Raise an exception if *iterable* is empty or has more than one
-    item.
-
-    :func:`one` is useful for ensuring that an iterable contains only one item.
-    For example, it can be used to retrieve the result of a database query
-    that is expected to return a single row.
-
-    If *iterable* is empty, ``ValueError`` will be raised. You may specify a
-    different exception with the *too_short* keyword:
-
-        >>> it = []
-        >>> one(it)  # doctest: +IGNORE_EXCEPTION_DETAIL
-        Traceback (most recent call last):
-        ...
-        ValueError: too many items in iterable (expected 1)'
-        >>> too_short = IndexError('too few items')
-        >>> one(it, too_short=too_short)  # doctest: +IGNORE_EXCEPTION_DETAIL
-        Traceback (most recent call last):
-        ...
-        IndexError: too few items
-
-    Similarly, if *iterable* contains more than one item, ``ValueError`` will
-    be raised. You may specify a different exception with the *too_long*
-    keyword:
-
-        >>> it = ['too', 'many']
-        >>> one(it)  # doctest: +IGNORE_EXCEPTION_DETAIL
-        Traceback (most recent call last):
-        ...
-        ValueError: Expected exactly one item in iterable, but got 'too',
-        'many', and perhaps more.
-        >>> too_long = RuntimeError
-        >>> one(it, too_long=too_long)  # doctest: +IGNORE_EXCEPTION_DETAIL
-        Traceback (most recent call last):
-        ...
-        RuntimeError
-
-    Note that :func:`one` attempts to advance *iterable* twice to ensure there
-    is only one item. See :func:`spy` or :func:`peekable` to check iterable
-    contents less destructively.
-
-    """
-    it = iter(iterable)
-
-    try:
-        first_value = next(it)
-    except StopIteration as e:
-        raise (
-            too_short or ValueError('too few items in iterable (expected 1)')
-        ) from e
-
-    try:
-        second_value = next(it)
-    except StopIteration:
-        pass
-    else:
-        msg = (
-            'Expected exactly one item in iterable, but got {!r}, {!r}, '
-            'and perhaps more.'.format(first_value, second_value)
-        )
-        raise too_long or ValueError(msg)
-
-    return first_value
-
-
-def raise_(exception, *args):
-    raise exception(*args)
-
-
-def strictly_n(iterable, n, too_short=None, too_long=None):
-    """Validate that *iterable* has exactly *n* items and return them if
-    it does. If it has fewer than *n* items, call function *too_short*
-    with those items. If it has more than *n* items, call function
-    *too_long* with the first ``n + 1`` items.
-
-        >>> iterable = ['a', 'b', 'c', 'd']
-        >>> n = 4
-        >>> list(strictly_n(iterable, n))
-        ['a', 'b', 'c', 'd']
-
-    By default, *too_short* and *too_long* are functions that raise
-    ``ValueError``.
-
-        >>> list(strictly_n('ab', 3))  # doctest: +IGNORE_EXCEPTION_DETAIL
-        Traceback (most recent call last):
-        ...
-        ValueError: too few items in iterable (got 2)
-
-        >>> list(strictly_n('abc', 2))  # doctest: +IGNORE_EXCEPTION_DETAIL
-        Traceback (most recent call last):
-        ...
-        ValueError: too many items in iterable (got at least 3)
-
-    You can instead supply functions that do something else.
-    *too_short* will be called with the number of items in *iterable*.
-    *too_long* will be called with `n + 1`.
-
-        >>> def too_short(item_count):
-        ...     raise RuntimeError
-        >>> it = strictly_n('abcd', 6, too_short=too_short)
-        >>> list(it)  # doctest: +IGNORE_EXCEPTION_DETAIL
-        Traceback (most recent call last):
-        ...
-        RuntimeError
-
-        >>> def too_long(item_count):
-        ...     print('The boss is going to hear about this')
-        >>> it = strictly_n('abcdef', 4, too_long=too_long)
-        >>> list(it)
-        The boss is going to hear about this
-        ['a', 'b', 'c', 'd']
-
-    """
-    if too_short is None:
-        too_short = lambda item_count: raise_(
-            ValueError,
-            'Too few items in iterable (got {})'.format(item_count),
-        )
-
-    if too_long is None:
-        too_long = lambda item_count: raise_(
-            ValueError,
-            'Too many items in iterable (got at least {})'.format(item_count),
-        )
-
-    it = iter(iterable)
-    for i in range(n):
-        try:
-            item = next(it)
-        except StopIteration:
-            too_short(i)
-            return
-        else:
-            yield item
-
-    try:
-        next(it)
-    except StopIteration:
-        pass
-    else:
-        too_long(n + 1)
-
-
-def distinct_permutations(iterable, r=None):
-    """Yield successive distinct permutations of the elements in *iterable*.
-
-        >>> sorted(distinct_permutations([1, 0, 1]))
-        [(0, 1, 1), (1, 0, 1), (1, 1, 0)]
-
-    Equivalent to ``set(permutations(iterable))``, except duplicates are not
-    generated and thrown away. For larger input sequences this is much more
-    efficient.
-
-    Duplicate permutations arise when there are duplicated elements in the
-    input iterable. The number of items returned is
-    `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
-    items input, and each `x_i` is the count of a distinct item in the input
-    sequence.
-
-    If *r* is given, only the *r*-length permutations are yielded.
-
-        >>> sorted(distinct_permutations([1, 0, 1], r=2))
-        [(0, 1), (1, 0), (1, 1)]
-        >>> sorted(distinct_permutations(range(3), r=2))
-        [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
-
-    """
-    # Algorithm: https://w.wiki/Qai
-    def _full(A):
-        while True:
-            # Yield the permutation we have
-            yield tuple(A)
-
-            # Find the largest index i such that A[i] < A[i + 1]
-            for i in range(size - 2, -1, -1):
-                if A[i] < A[i + 1]:
-                    break
-            #  If no such index exists, this permutation is the last one
-            else:
-                return
-
-            # Find the largest index j greater than j such that A[i] < A[j]
-            for j in range(size - 1, i, -1):
-                if A[i] < A[j]:
-                    break
-
-            # Swap the value of A[i] with that of A[j], then reverse the
-            # sequence from A[i + 1] to form the new permutation
-            A[i], A[j] = A[j], A[i]
-            A[i + 1 :] = A[: i - size : -1]  # A[i + 1:][::-1]
-
-    # Algorithm: modified from the above
-    def _partial(A, r):
-        # Split A into the first r items and the last r items
-        head, tail = A[:r], A[r:]
-        right_head_indexes = range(r - 1, -1, -1)
-        left_tail_indexes = range(len(tail))
-
-        while True:
-            # Yield the permutation we have
-            yield tuple(head)
-
-            # Starting from the right, find the first index of the head with
-            # value smaller than the maximum value of the tail - call it i.
-            pivot = tail[-1]
-            for i in right_head_indexes:
-                if head[i] < pivot:
-                    break
-                pivot = head[i]
-            else:
-                return
-
-            # Starting from the left, find the first value of the tail
-            # with a value greater than head[i] and swap.
-            for j in left_tail_indexes:
-                if tail[j] > head[i]:
-                    head[i], tail[j] = tail[j], head[i]
-                    break
-            # If we didn't find one, start from the right and find the first
-            # index of the head with a value greater than head[i] and swap.
-            else:
-                for j in right_head_indexes:
-                    if head[j] > head[i]:
-                        head[i], head[j] = head[j], head[i]
-                        break
-
-            # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
-            tail += head[: i - r : -1]  # head[i + 1:][::-1]
-            i += 1
-            head[i:], tail[:] = tail[: r - i], tail[r - i :]
-
-    items = sorted(iterable)
-
-    size = len(items)
-    if r is None:
-        r = size
-
-    if 0 < r <= size:
-        return _full(items) if (r == size) else _partial(items, r)
-
-    return iter(() if r else ((),))
-
-
-def intersperse(e, iterable, n=1):
-    """Intersperse filler element *e* among the items in *iterable*, leaving
-    *n* items between each filler element.
-
-        >>> list(intersperse('!', [1, 2, 3, 4, 5]))
-        [1, '!', 2, '!', 3, '!', 4, '!', 5]
-
-        >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
-        [1, 2, None, 3, 4, None, 5]
-
-    """
-    if n == 0:
-        raise ValueError('n must be > 0')
-    elif n == 1:
-        # interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2...
-        # islice(..., 1, None) -> x_0, e, x_1, e, x_2...
-        return islice(interleave(repeat(e), iterable), 1, None)
-    else:
-        # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
-        # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
-        # flatten(...) -> x_0, x_1, e, x_2, x_3...
-        filler = repeat([e])
-        chunks = chunked(iterable, n)
-        return flatten(islice(interleave(filler, chunks), 1, None))
-
-
-def unique_to_each(*iterables):
-    """Return the elements from each of the input iterables that aren't in the
-    other input iterables.
-
-    For example, suppose you have a set of packages, each with a set of
-    dependencies::
-
-        {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
-
-    If you remove one package, which dependencies can also be removed?
-
-    If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
-    associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
-    ``pkg_2``, and ``D`` is only needed for ``pkg_3``::
-
-        >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
-        [['A'], ['C'], ['D']]
-
-    If there are duplicates in one input iterable that aren't in the others
-    they will be duplicated in the output. Input order is preserved::
-
-        >>> unique_to_each("mississippi", "missouri")
-        [['p', 'p'], ['o', 'u', 'r']]
-
-    It is assumed that the elements of each iterable are hashable.
-
-    """
-    pool = [list(it) for it in iterables]
-    counts = Counter(chain.from_iterable(map(set, pool)))
-    uniques = {element for element in counts if counts[element] == 1}
-    return [list(filter(uniques.__contains__, it)) for it in pool]
-
-
-def windowed(seq, n, fillvalue=None, step=1):
-    """Return a sliding window of width *n* over the given iterable.
-
-        >>> all_windows = windowed([1, 2, 3, 4, 5], 3)
-        >>> list(all_windows)
-        [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
-
-    When the window is larger than the iterable, *fillvalue* is used in place
-    of missing values:
-
-        >>> list(windowed([1, 2, 3], 4))
-        [(1, 2, 3, None)]
-
-    Each window will advance in increments of *step*:
-
-        >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
-        [(1, 2, 3), (3, 4, 5), (5, 6, '!')]
-
-    To slide into the iterable's items, use :func:`chain` to add filler items
-    to the left:
-
-        >>> iterable = [1, 2, 3, 4]
-        >>> n = 3
-        >>> padding = [None] * (n - 1)
-        >>> list(windowed(chain(padding, iterable), 3))
-        [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
-    """
-    if n < 0:
-        raise ValueError('n must be >= 0')
-    if n == 0:
-        yield tuple()
-        return
-    if step < 1:
-        raise ValueError('step must be >= 1')
-
-    window = deque(maxlen=n)
-    i = n
-    for _ in map(window.append, seq):
-        i -= 1
-        if not i:
-            i = step
-            yield tuple(window)
-
-    size = len(window)
-    if size < n:
-        yield tuple(chain(window, repeat(fillvalue, n - size)))
-    elif 0 < i < min(step, n):
-        window += (fillvalue,) * i
-        yield tuple(window)
-
-
-def substrings(iterable):
-    """Yield all of the substrings of *iterable*.
-
-        >>> [''.join(s) for s in substrings('more')]
-        ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
-
-    Note that non-string iterables can also be subdivided.
-
-        >>> list(substrings([0, 1, 2]))
-        [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
-
-    """
-    # The length-1 substrings
-    seq = []
-    for item in iter(iterable):
-        seq.append(item)
-        yield (item,)
-    seq = tuple(seq)
-    item_count = len(seq)
-
-    # And the rest
-    for n in range(2, item_count + 1):
-        for i in range(item_count - n + 1):
-            yield seq[i : i + n]
-
-
-def substrings_indexes(seq, reverse=False):
-    """Yield all substrings and their positions in *seq*
-
-    The items yielded will be a tuple of the form ``(substr, i, j)``, where
-    ``substr == seq[i:j]``.
-
-    This function only works for iterables that support slicing, such as
-    ``str`` objects.
-
-    >>> for item in substrings_indexes('more'):
-    ...    print(item)
-    ('m', 0, 1)
-    ('o', 1, 2)
-    ('r', 2, 3)
-    ('e', 3, 4)
-    ('mo', 0, 2)
-    ('or', 1, 3)
-    ('re', 2, 4)
-    ('mor', 0, 3)
-    ('ore', 1, 4)
-    ('more', 0, 4)
-
-    Set *reverse* to ``True`` to yield the same items in the opposite order.
-
-
-    """
-    r = range(1, len(seq) + 1)
-    if reverse:
-        r = reversed(r)
-    return (
-        (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
-    )
-
-
-class bucket:
-    """Wrap *iterable* and return an object that buckets it iterable into
-    child iterables based on a *key* function.
-
-        >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
-        >>> s = bucket(iterable, key=lambda x: x[0])  # Bucket by 1st character
-        >>> sorted(list(s))  # Get the keys
-        ['a', 'b', 'c']
-        >>> a_iterable = s['a']
-        >>> next(a_iterable)
-        'a1'
-        >>> next(a_iterable)
-        'a2'
-        >>> list(s['b'])
-        ['b1', 'b2', 'b3']
-
-    The original iterable will be advanced and its items will be cached until
-    they are used by the child iterables. This may require significant storage.
-
-    By default, attempting to select a bucket to which no items belong  will
-    exhaust the iterable and cache all values.
-    If you specify a *validator* function, selected buckets will instead be
-    checked against it.
-
-        >>> from itertools import count
-        >>> it = count(1, 2)  # Infinite sequence of odd numbers
-        >>> key = lambda x: x % 10  # Bucket by last digit
-        >>> validator = lambda x: x in {1, 3, 5, 7, 9}  # Odd digits only
-        >>> s = bucket(it, key=key, validator=validator)
-        >>> 2 in s
-        False
-        >>> list(s[2])
-        []
-
-    """
-
-    def __init__(self, iterable, key, validator=None):
-        self._it = iter(iterable)
-        self._key = key
-        self._cache = defaultdict(deque)
-        self._validator = validator or (lambda x: True)
-
-    def __contains__(self, value):
-        if not self._validator(value):
-            return False
-
-        try:
-            item = next(self[value])
-        except StopIteration:
-            return False
-        else:
-            self._cache[value].appendleft(item)
-
-        return True
-
-    def _get_values(self, value):
-        """
-        Helper to yield items from the parent iterator that match *value*.
-        Items that don't match are stored in the local cache as they
-        are encountered.
-        """
-        while True:
-            # If we've cached some items that match the target value, emit
-            # the first one and evict it from the cache.
-            if self._cache[value]:
-                yield self._cache[value].popleft()
-            # Otherwise we need to advance the parent iterator to search for
-            # a matching item, caching the rest.
-            else:
-                while True:
-                    try:
-                        item = next(self._it)
-                    except StopIteration:
-                        return
-                    item_value = self._key(item)
-                    if item_value == value:
-                        yield item
-                        break
-                    elif self._validator(item_value):
-                        self._cache[item_value].append(item)
-
-    def __iter__(self):
-        for item in self._it:
-            item_value = self._key(item)
-            if self._validator(item_value):
-                self._cache[item_value].append(item)
-
-        yield from self._cache.keys()
-
-    def __getitem__(self, value):
-        if not self._validator(value):
-            return iter(())
-
-        return self._get_values(value)
-
-
-def spy(iterable, n=1):
-    """Return a 2-tuple with a list containing the first *n* elements of
-    *iterable*, and an iterator with the same items as *iterable*.
-    This allows you to "look ahead" at the items in the iterable without
-    advancing it.
-
-    There is one item in the list by default:
-
-        >>> iterable = 'abcdefg'
-        >>> head, iterable = spy(iterable)
-        >>> head
-        ['a']
-        >>> list(iterable)
-        ['a', 'b', 'c', 'd', 'e', 'f', 'g']
-
-    You may use unpacking to retrieve items instead of lists:
-
-        >>> (head,), iterable = spy('abcdefg')
-        >>> head
-        'a'
-        >>> (first, second), iterable = spy('abcdefg', 2)
-        >>> first
-        'a'
-        >>> second
-        'b'
-
-    The number of items requested can be larger than the number of items in
-    the iterable:
-
-        >>> iterable = [1, 2, 3, 4, 5]
-        >>> head, iterable = spy(iterable, 10)
-        >>> head
-        [1, 2, 3, 4, 5]
-        >>> list(iterable)
-        [1, 2, 3, 4, 5]
-
-    """
-    it = iter(iterable)
-    head = take(n, it)
-
-    return head.copy(), chain(head, it)
-
-
-def interleave(*iterables):
-    """Return a new iterable yielding from each iterable in turn,
-    until the shortest is exhausted.
-
-        >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
-        [1, 4, 6, 2, 5, 7]
-
-    For a version that doesn't terminate after the shortest iterable is
-    exhausted, see :func:`interleave_longest`.
-
-    """
-    return chain.from_iterable(zip(*iterables))
-
-
-def interleave_longest(*iterables):
-    """Return a new iterable yielding from each iterable in turn,
-    skipping any that are exhausted.
-
-        >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
-        [1, 4, 6, 2, 5, 7, 3, 8]
-
-    This function produces the same output as :func:`roundrobin`, but may
-    perform better for some inputs (in particular when the number of iterables
-    is large).
-
-    """
-    i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
-    return (x for x in i if x is not _marker)
-
-
-def interleave_evenly(iterables, lengths=None):
-    """
-    Interleave multiple iterables so that their elements are evenly distributed
-    throughout the output sequence.
-
-    >>> iterables = [1, 2, 3, 4, 5], ['a', 'b']
-    >>> list(interleave_evenly(iterables))
-    [1, 2, 'a', 3, 4, 'b', 5]
-
-    >>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]]
-    >>> list(interleave_evenly(iterables))
-    [1, 6, 4, 2, 7, 3, 8, 5]
-
-    This function requires iterables of known length. Iterables without
-    ``__len__()`` can be used by manually specifying lengths with *lengths*:
-
-    >>> from itertools import combinations, repeat
-    >>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']]
-    >>> lengths = [4 * (4 - 1) // 2, 3]
-    >>> list(interleave_evenly(iterables, lengths=lengths))
-    [(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c']
-
-    Based on Bresenham's algorithm.
-    """
-    if lengths is None:
-        try:
-            lengths = [len(it) for it in iterables]
-        except TypeError:
-            raise ValueError(
-                'Iterable lengths could not be determined automatically. '
-                'Specify them with the lengths keyword.'
-            )
-    elif len(iterables) != len(lengths):
-        raise ValueError('Mismatching number of iterables and lengths.')
-
-    dims = len(lengths)
-
-    # sort iterables by length, descending
-    lengths_permute = sorted(
-        range(dims), key=lambda i: lengths[i], reverse=True
-    )
-    lengths_desc = [lengths[i] for i in lengths_permute]
-    iters_desc = [iter(iterables[i]) for i in lengths_permute]
-
-    # the longest iterable is the primary one (Bresenham: the longest
-    # distance along an axis)
-    delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:]
-    iter_primary, iters_secondary = iters_desc[0], iters_desc[1:]
-    errors = [delta_primary // dims] * len(deltas_secondary)
-
-    to_yield = sum(lengths)
-    while to_yield:
-        yield next(iter_primary)
-        to_yield -= 1
-        # update errors for each secondary iterable
-        errors = [e - delta for e, delta in zip(errors, deltas_secondary)]
-
-        # those iterables for which the error is negative are yielded
-        # ("diagonal step" in Bresenham)
-        for i, e in enumerate(errors):
-            if e < 0:
-                yield next(iters_secondary[i])
-                to_yield -= 1
-                errors[i] += delta_primary
-
-
-def collapse(iterable, base_type=None, levels=None):
-    """Flatten an iterable with multiple levels of nesting (e.g., a list of
-    lists of tuples) into non-iterable types.
-
-        >>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
-        >>> list(collapse(iterable))
-        [1, 2, 3, 4, 5, 6]
-
-    Binary and text strings are not considered iterable and
-    will not be collapsed.
-
-    To avoid collapsing other types, specify *base_type*:
-
-        >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
-        >>> list(collapse(iterable, base_type=tuple))
-        ['ab', ('cd', 'ef'), 'gh', 'ij']
-
-    Specify *levels* to stop flattening after a certain level:
-
-    >>> iterable = [('a', ['b']), ('c', ['d'])]
-    >>> list(collapse(iterable))  # Fully flattened
-    ['a', 'b', 'c', 'd']
-    >>> list(collapse(iterable, levels=1))  # Only one level flattened
-    ['a', ['b'], 'c', ['d']]
-
-    """
-
-    def walk(node, level):
-        if (
-            ((levels is not None) and (level > levels))
-            or isinstance(node, (str, bytes))
-            or ((base_type is not None) and isinstance(node, base_type))
-        ):
-            yield node
-            return
-
-        try:
-            tree = iter(node)
-        except TypeError:
-            yield node
-            return
-        else:
-            for child in tree:
-                yield from walk(child, level + 1)
-
-    yield from walk(iterable, 0)
-
-
-def side_effect(func, iterable, chunk_size=None, before=None, after=None):
-    """Invoke *func* on each item in *iterable* (or on each *chunk_size* group
-    of items) before yielding the item.
-
-    `func` must be a function that takes a single argument. Its return value
-    will be discarded.
-
-    *before* and *after* are optional functions that take no arguments. They
-    will be executed before iteration starts and after it ends, respectively.
-
-    `side_effect` can be used for logging, updating progress bars, or anything
-    that is not functionally "pure."
-
-    Emitting a status message:
-
-        >>> from more_itertools import consume
-        >>> func = lambda item: print('Received {}'.format(item))
-        >>> consume(side_effect(func, range(2)))
-        Received 0
-        Received 1
-
-    Operating on chunks of items:
-
-        >>> pair_sums = []
-        >>> func = lambda chunk: pair_sums.append(sum(chunk))
-        >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
-        [0, 1, 2, 3, 4, 5]
-        >>> list(pair_sums)
-        [1, 5, 9]
-
-    Writing to a file-like object:
-
-        >>> from io import StringIO
-        >>> from more_itertools import consume
-        >>> f = StringIO()
-        >>> func = lambda x: print(x, file=f)
-        >>> before = lambda: print(u'HEADER', file=f)
-        >>> after = f.close
-        >>> it = [u'a', u'b', u'c']
-        >>> consume(side_effect(func, it, before=before, after=after))
-        >>> f.closed
-        True
-
-    """
-    try:
-        if before is not None:
-            before()
-
-        if chunk_size is None:
-            for item in iterable:
-                func(item)
-                yield item
-        else:
-            for chunk in chunked(iterable, chunk_size):
-                func(chunk)
-                yield from chunk
-    finally:
-        if after is not None:
-            after()
-
-
-def sliced(seq, n, strict=False):
-    """Yield slices of length *n* from the sequence *seq*.
-
-    >>> list(sliced((1, 2, 3, 4, 5, 6), 3))
-    [(1, 2, 3), (4, 5, 6)]
-
-    By the default, the last yielded slice will have fewer than *n* elements
-    if the length of *seq* is not divisible by *n*:
-
-    >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
-    [(1, 2, 3), (4, 5, 6), (7, 8)]
-
-    If the length of *seq* is not divisible by *n* and *strict* is
-    ``True``, then ``ValueError`` will be raised before the last
-    slice is yielded.
-
-    This function will only work for iterables that support slicing.
-    For non-sliceable iterables, see :func:`chunked`.
-
-    """
-    iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
-    if strict:
-
-        def ret():
-            for _slice in iterator:
-                if len(_slice) != n:
-                    raise ValueError("seq is not divisible by n.")
-                yield _slice
-
-        return iter(ret())
-    else:
-        return iterator
-
-
-def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
-    """Yield lists of items from *iterable*, where each list is delimited by
-    an item where callable *pred* returns ``True``.
-
-        >>> list(split_at('abcdcba', lambda x: x == 'b'))
-        [['a'], ['c', 'd', 'c'], ['a']]
-
-        >>> list(split_at(range(10), lambda n: n % 2 == 1))
-        [[0], [2], [4], [6], [8], []]
-
-    At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
-    then there is no limit on the number of splits:
-
-        >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
-        [[0], [2], [4, 5, 6, 7, 8, 9]]
-
-    By default, the delimiting items are not included in the output.
-    The include them, set *keep_separator* to ``True``.
-
-        >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
-        [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
-
-    """
-    if maxsplit == 0:
-        yield list(iterable)
-        return
-
-    buf = []
-    it = iter(iterable)
-    for item in it:
-        if pred(item):
-            yield buf
-            if keep_separator:
-                yield [item]
-            if maxsplit == 1:
-                yield list(it)
-                return
-            buf = []
-            maxsplit -= 1
-        else:
-            buf.append(item)
-    yield buf
-
-
-def split_before(iterable, pred, maxsplit=-1):
-    """Yield lists of items from *iterable*, where each list ends just before
-    an item for which callable *pred* returns ``True``:
-
-        >>> list(split_before('OneTwo', lambda s: s.isupper()))
-        [['O', 'n', 'e'], ['T', 'w', 'o']]
-
-        >>> list(split_before(range(10), lambda n: n % 3 == 0))
-        [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
-
-    At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
-    then there is no limit on the number of splits:
-
-        >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
-        [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
-    """
-    if maxsplit == 0:
-        yield list(iterable)
-        return
-
-    buf = []
-    it = iter(iterable)
-    for item in it:
-        if pred(item) and buf:
-            yield buf
-            if maxsplit == 1:
-                yield [item] + list(it)
-                return
-            buf = []
-            maxsplit -= 1
-        buf.append(item)
-    if buf:
-        yield buf
-
-
-def split_after(iterable, pred, maxsplit=-1):
-    """Yield lists of items from *iterable*, where each list ends with an
-    item where callable *pred* returns ``True``:
-
-        >>> list(split_after('one1two2', lambda s: s.isdigit()))
-        [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
-
-        >>> list(split_after(range(10), lambda n: n % 3 == 0))
-        [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
-
-    At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
-    then there is no limit on the number of splits:
-
-        >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
-        [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
-
-    """
-    if maxsplit == 0:
-        yield list(iterable)
-        return
-
-    buf = []
-    it = iter(iterable)
-    for item in it:
-        buf.append(item)
-        if pred(item) and buf:
-            yield buf
-            if maxsplit == 1:
-                yield list(it)
-                return
-            buf = []
-            maxsplit -= 1
-    if buf:
-        yield buf
-
-
-def split_when(iterable, pred, maxsplit=-1):
-    """Split *iterable* into pieces based on the output of *pred*.
-    *pred* should be a function that takes successive pairs of items and
-    returns ``True`` if the iterable should be split in between them.
-
-    For example, to find runs of increasing numbers, split the iterable when
-    element ``i`` is larger than element ``i + 1``:
-
-        >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
-        [[1, 2, 3, 3], [2, 5], [2, 4], [2]]
-
-    At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
-    then there is no limit on the number of splits:
-
-        >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
-        ...                 lambda x, y: x > y, maxsplit=2))
-        [[1, 2, 3, 3], [2, 5], [2, 4, 2]]
-
-    """
-    if maxsplit == 0:
-        yield list(iterable)
-        return
-
-    it = iter(iterable)
-    try:
-        cur_item = next(it)
-    except StopIteration:
-        return
-
-    buf = [cur_item]
-    for next_item in it:
-        if pred(cur_item, next_item):
-            yield buf
-            if maxsplit == 1:
-                yield [next_item] + list(it)
-                return
-            buf = []
-            maxsplit -= 1
-
-        buf.append(next_item)
-        cur_item = next_item
-
-    yield buf
-
-
-def split_into(iterable, sizes):
-    """Yield a list of sequential items from *iterable* of length 'n' for each
-    integer 'n' in *sizes*.
-
-        >>> list(split_into([1,2,3,4,5,6], [1,2,3]))
-        [[1], [2, 3], [4, 5, 6]]
-
-    If the sum of *sizes* is smaller than the length of *iterable*, then the
-    remaining items of *iterable* will not be returned.
-
-        >>> list(split_into([1,2,3,4,5,6], [2,3]))
-        [[1, 2], [3, 4, 5]]
-
-    If the sum of *sizes* is larger than the length of *iterable*, fewer items
-    will be returned in the iteration that overruns *iterable* and further
-    lists will be empty:
-
-        >>> list(split_into([1,2,3,4], [1,2,3,4]))
-        [[1], [2, 3], [4], []]
-
-    When a ``None`` object is encountered in *sizes*, the returned list will
-    contain items up to the end of *iterable* the same way that itertools.slice
-    does:
-
-        >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
-        [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
-
-    :func:`split_into` can be useful for grouping a series of items where the
-    sizes of the groups are not uniform. An example would be where in a row
-    from a table, multiple columns represent elements of the same feature
-    (e.g. a point represented by x,y,z) but, the format is not the same for
-    all columns.
-    """
-    # convert the iterable argument into an iterator so its contents can
-    # be consumed by islice in case it is a generator
-    it = iter(iterable)
-
-    for size in sizes:
-        if size is None:
-            yield list(it)
-            return
-        else:
-            yield list(islice(it, size))
-
-
-def padded(iterable, fillvalue=None, n=None, next_multiple=False):
-    """Yield the elements from *iterable*, followed by *fillvalue*, such that
-    at least *n* items are emitted.
-
-        >>> list(padded([1, 2, 3], '?', 5))
-        [1, 2, 3, '?', '?']
-
-    If *next_multiple* is ``True``, *fillvalue* will be emitted until the
-    number of items emitted is a multiple of *n*::
-
-        >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
-        [1, 2, 3, 4, None, None]
-
-    If *n* is ``None``, *fillvalue* will be emitted indefinitely.
-
-    """
-    it = iter(iterable)
-    if n is None:
-        yield from chain(it, repeat(fillvalue))
-    elif n < 1:
-        raise ValueError('n must be at least 1')
-    else:
-        item_count = 0
-        for item in it:
-            yield item
-            item_count += 1
-
-        remaining = (n - item_count) % n if next_multiple else n - item_count
-        for _ in range(remaining):
-            yield fillvalue
-
-
-def repeat_each(iterable, n=2):
-    """Repeat each element in *iterable* *n* times.
-
-    >>> list(repeat_each('ABC', 3))
-    ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
-    """
-    return chain.from_iterable(map(repeat, iterable, repeat(n)))
-
-
-def repeat_last(iterable, default=None):
-    """After the *iterable* is exhausted, keep yielding its last element.
-
-        >>> list(islice(repeat_last(range(3)), 5))
-        [0, 1, 2, 2, 2]
-
-    If the iterable is empty, yield *default* forever::
-
-        >>> list(islice(repeat_last(range(0), 42), 5))
-        [42, 42, 42, 42, 42]
-
-    """
-    item = _marker
-    for item in iterable:
-        yield item
-    final = default if item is _marker else item
-    yield from repeat(final)
-
-
-def distribute(n, iterable):
-    """Distribute the items from *iterable* among *n* smaller iterables.
-
-        >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
-        >>> list(group_1)
-        [1, 3, 5]
-        >>> list(group_2)
-        [2, 4, 6]
-
-    If the length of *iterable* is not evenly divisible by *n*, then the
-    length of the returned iterables will not be identical:
-
-        >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
-        >>> [list(c) for c in children]
-        [[1, 4, 7], [2, 5], [3, 6]]
-
-    If the length of *iterable* is smaller than *n*, then the last returned
-    iterables will be empty:
-
-        >>> children = distribute(5, [1, 2, 3])
-        >>> [list(c) for c in children]
-        [[1], [2], [3], [], []]
-
-    This function uses :func:`itertools.tee` and may require significant
-    storage. If you need the order items in the smaller iterables to match the
-    original iterable, see :func:`divide`.
-
-    """
-    if n < 1:
-        raise ValueError('n must be at least 1')
-
-    children = tee(iterable, n)
-    return [islice(it, index, None, n) for index, it in enumerate(children)]
-
-
-def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
-    """Yield tuples whose elements are offset from *iterable*.
-    The amount by which the `i`-th item in each tuple is offset is given by
-    the `i`-th item in *offsets*.
-
-        >>> list(stagger([0, 1, 2, 3]))
-        [(None, 0, 1), (0, 1, 2), (1, 2, 3)]
-        >>> list(stagger(range(8), offsets=(0, 2, 4)))
-        [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
-
-    By default, the sequence will end when the final element of a tuple is the
-    last item in the iterable. To continue until the first element of a tuple
-    is the last item in the iterable, set *longest* to ``True``::
-
-        >>> list(stagger([0, 1, 2, 3], longest=True))
-        [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
-
-    By default, ``None`` will be used to replace offsets beyond the end of the
-    sequence. Specify *fillvalue* to use some other value.
-
-    """
-    children = tee(iterable, len(offsets))
-
-    return zip_offset(
-        *children, offsets=offsets, longest=longest, fillvalue=fillvalue
-    )
-
-
-class UnequalIterablesError(ValueError):
-    def __init__(self, details=None):
-        msg = 'Iterables have different lengths'
-        if details is not None:
-            msg += (': index 0 has length {}; index {} has length {}').format(
-                *details
-            )
-
-        super().__init__(msg)
-
-
-def _zip_equal_generator(iterables):
-    for combo in zip_longest(*iterables, fillvalue=_marker):
-        for val in combo:
-            if val is _marker:
-                raise UnequalIterablesError()
-        yield combo
-
-
-def _zip_equal(*iterables):
-    # Check whether the iterables are all the same size.
-    try:
-        first_size = len(iterables[0])
-        for i, it in enumerate(iterables[1:], 1):
-            size = len(it)
-            if size != first_size:
-                break
-        else:
-            # If we didn't break out, we can use the built-in zip.
-            return zip(*iterables)
-
-        # If we did break out, there was a mismatch.
-        raise UnequalIterablesError(details=(first_size, i, size))
-    # If any one of the iterables didn't have a length, start reading
-    # them until one runs out.
-    except TypeError:
-        return _zip_equal_generator(iterables)
-
-
-def zip_equal(*iterables):
-    """``zip`` the input *iterables* together, but raise
-    ``UnequalIterablesError`` if they aren't all the same length.
-
-        >>> it_1 = range(3)
-        >>> it_2 = iter('abc')
-        >>> list(zip_equal(it_1, it_2))
-        [(0, 'a'), (1, 'b'), (2, 'c')]
-
-        >>> it_1 = range(3)
-        >>> it_2 = iter('abcd')
-        >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
-        Traceback (most recent call last):
-        ...
-        more_itertools.more.UnequalIterablesError: Iterables have different
-        lengths
-
-    """
-    if hexversion >= 0x30A00A6:
-        warnings.warn(
-            (
-                'zip_equal will be removed in a future version of '
-                'more-itertools. Use the builtin zip function with '
-                'strict=True instead.'
-            ),
-            DeprecationWarning,
-        )
-
-    return _zip_equal(*iterables)
-
-
-def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
-    """``zip`` the input *iterables* together, but offset the `i`-th iterable
-    by the `i`-th item in *offsets*.
-
-        >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
-        [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
-
-    This can be used as a lightweight alternative to SciPy or pandas to analyze
-    data sets in which some series have a lead or lag relationship.
-
-    By default, the sequence will end when the shortest iterable is exhausted.
-    To continue until the longest iterable is exhausted, set *longest* to
-    ``True``.
-
-        >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
-        [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
-
-    By default, ``None`` will be used to replace offsets beyond the end of the
-    sequence. Specify *fillvalue* to use some other value.
-
-    """
-    if len(iterables) != len(offsets):
-        raise ValueError("Number of iterables and offsets didn't match")
-
-    staggered = []
-    for it, n in zip(iterables, offsets):
-        if n < 0:
-            staggered.append(chain(repeat(fillvalue, -n), it))
-        elif n > 0:
-            staggered.append(islice(it, n, None))
-        else:
-            staggered.append(it)
-
-    if longest:
-        return zip_longest(*staggered, fillvalue=fillvalue)
-
-    return zip(*staggered)
-
-
-def sort_together(iterables, key_list=(0,), key=None, reverse=False):
-    """Return the input iterables sorted together, with *key_list* as the
-    priority for sorting. All iterables are trimmed to the length of the
-    shortest one.
-
-    This can be used like the sorting function in a spreadsheet. If each
-    iterable represents a column of data, the key list determines which
-    columns are used for sorting.
-
-    By default, all iterables are sorted using the ``0``-th iterable::
-
-        >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
-        >>> sort_together(iterables)
-        [(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
-
-    Set a different key list to sort according to another iterable.
-    Specifying multiple keys dictates how ties are broken::
-
-        >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
-        >>> sort_together(iterables, key_list=(1, 2))
-        [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
-
-    To sort by a function of the elements of the iterable, pass a *key*
-    function. Its arguments are the elements of the iterables corresponding to
-    the key list::
-
-        >>> names = ('a', 'b', 'c')
-        >>> lengths = (1, 2, 3)
-        >>> widths = (5, 2, 1)
-        >>> def area(length, width):
-        ...     return length * width
-        >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
-        [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
-
-    Set *reverse* to ``True`` to sort in descending order.
-
-        >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
-        [(3, 2, 1), ('a', 'b', 'c')]
-
-    """
-    if key is None:
-        # if there is no key function, the key argument to sorted is an
-        # itemgetter
-        key_argument = itemgetter(*key_list)
-    else:
-        # if there is a key function, call it with the items at the offsets
-        # specified by the key function as arguments
-        key_list = list(key_list)
-        if len(key_list) == 1:
-            # if key_list contains a single item, pass the item at that offset
-            # as the only argument to the key function
-            key_offset = key_list[0]
-            key_argument = lambda zipped_items: key(zipped_items[key_offset])
-        else:
-            # if key_list contains multiple items, use itemgetter to return a
-            # tuple of items, which we pass as *args to the key function
-            get_key_items = itemgetter(*key_list)
-            key_argument = lambda zipped_items: key(
-                *get_key_items(zipped_items)
-            )
-
-    return list(
-        zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
-    )
-
-
-def unzip(iterable):
-    """The inverse of :func:`zip`, this function disaggregates the elements
-    of the zipped *iterable*.
-
-    The ``i``-th iterable contains the ``i``-th element from each element
-    of the zipped iterable. The first element is used to to determine the
-    length of the remaining elements.
-
-        >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
-        >>> letters, numbers = unzip(iterable)
-        >>> list(letters)
-        ['a', 'b', 'c', 'd']
-        >>> list(numbers)
-        [1, 2, 3, 4]
-
-    This is similar to using ``zip(*iterable)``, but it avoids reading
-    *iterable* into memory. Note, however, that this function uses
-    :func:`itertools.tee` and thus may require significant storage.
-
-    """
-    head, iterable = spy(iter(iterable))
-    if not head:
-        # empty iterable, e.g. zip([], [], [])
-        return ()
-    # spy returns a one-length iterable as head
-    head = head[0]
-    iterables = tee(iterable, len(head))
-
-    def itemgetter(i):
-        def getter(obj):
-            try:
-                return obj[i]
-            except IndexError:
-                # basically if we have an iterable like
-                # iter([(1, 2, 3), (4, 5), (6,)])
-                # the second unzipped iterable would fail at the third tuple
-                # since it would try to access tup[1]
-                # same with the third unzipped iterable and the second tuple
-                # to support these "improperly zipped" iterables,
-                # we create a custom itemgetter
-                # which just stops the unzipped iterables
-                # at first length mismatch
-                raise StopIteration
-
-        return getter
-
-    return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
-
-
-def divide(n, iterable):
-    """Divide the elements from *iterable* into *n* parts, maintaining
-    order.
-
-        >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
-        >>> list(group_1)
-        [1, 2, 3]
-        >>> list(group_2)
-        [4, 5, 6]
-
-    If the length of *iterable* is not evenly divisible by *n*, then the
-    length of the returned iterables will not be identical:
-
-        >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
-        >>> [list(c) for c in children]
-        [[1, 2, 3], [4, 5], [6, 7]]
-
-    If the length of the iterable is smaller than n, then the last returned
-    iterables will be empty:
-
-        >>> children = divide(5, [1, 2, 3])
-        >>> [list(c) for c in children]
-        [[1], [2], [3], [], []]
-
-    This function will exhaust the iterable before returning and may require
-    significant storage. If order is not important, see :func:`distribute`,
-    which does not first pull the iterable into memory.
-
-    """
-    if n < 1:
-        raise ValueError('n must be at least 1')
-
-    try:
-        iterable[:0]
-    except TypeError:
-        seq = tuple(iterable)
-    else:
-        seq = iterable
-
-    q, r = divmod(len(seq), n)
-
-    ret = []
-    stop = 0
-    for i in range(1, n + 1):
-        start = stop
-        stop += q + 1 if i <= r else q
-        ret.append(iter(seq[start:stop]))
-
-    return ret
-
-
-def always_iterable(obj, base_type=(str, bytes)):
-    """If *obj* is iterable, return an iterator over its items::
-
-        >>> obj = (1, 2, 3)
-        >>> list(always_iterable(obj))
-        [1, 2, 3]
-
-    If *obj* is not iterable, return a one-item iterable containing *obj*::
-
-        >>> obj = 1
-        >>> list(always_iterable(obj))
-        [1]
-
-    If *obj* is ``None``, return an empty iterable:
-
-        >>> obj = None
-        >>> list(always_iterable(None))
-        []
-
-    By default, binary and text strings are not considered iterable::
-
-        >>> obj = 'foo'
-        >>> list(always_iterable(obj))
-        ['foo']
-
-    If *base_type* is set, objects for which ``isinstance(obj, base_type)``
-    returns ``True`` won't be considered iterable.
-
-        >>> obj = {'a': 1}
-        >>> list(always_iterable(obj))  # Iterate over the dict's keys
-        ['a']
-        >>> list(always_iterable(obj, base_type=dict))  # Treat dicts as a unit
-        [{'a': 1}]
-
-    Set *base_type* to ``None`` to avoid any special handling and treat objects
-    Python considers iterable as iterable:
-
-        >>> obj = 'foo'
-        >>> list(always_iterable(obj, base_type=None))
-        ['f', 'o', 'o']
-    """
-    if obj is None:
-        return iter(())
-
-    if (base_type is not None) and isinstance(obj, base_type):
-        return iter((obj,))
-
-    try:
-        return iter(obj)
-    except TypeError:
-        return iter((obj,))
-
-
-def adjacent(predicate, iterable, distance=1):
-    """Return an iterable over `(bool, item)` tuples where the `item` is
-    drawn from *iterable* and the `bool` indicates whether
-    that item satisfies the *predicate* or is adjacent to an item that does.
-
-    For example, to find whether items are adjacent to a ``3``::
-
-        >>> list(adjacent(lambda x: x == 3, range(6)))
-        [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
-
-    Set *distance* to change what counts as adjacent. For example, to find
-    whether items are two places away from a ``3``:
-
-        >>> list(adjacent(lambda x: x == 3, range(6), distance=2))
-        [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
-
-    This is useful for contextualizing the results of a search function.
-    For example, a code comparison tool might want to identify lines that
-    have changed, but also surrounding lines to give the viewer of the diff
-    context.
-
-    The predicate function will only be called once for each item in the
-    iterable.
-
-    See also :func:`groupby_transform`, which can be used with this function
-    to group ranges of items with the same `bool` value.
-
-    """
-    # Allow distance=0 mainly for testing that it reproduces results with map()
-    if distance < 0:
-        raise ValueError('distance must be at least 0')
-
-    i1, i2 = tee(iterable)
-    padding = [False] * distance
-    selected = chain(padding, map(predicate, i1), padding)
-    adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
-    return zip(adjacent_to_selected, i2)
-
-
-def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
-    """An extension of :func:`itertools.groupby` that can apply transformations
-    to the grouped data.
-
-    * *keyfunc* is a function computing a key value for each item in *iterable*
-    * *valuefunc* is a function that transforms the individual items from
-      *iterable* after grouping
-    * *reducefunc* is a function that transforms each group of items
-
-    >>> iterable = 'aAAbBBcCC'
-    >>> keyfunc = lambda k: k.upper()
-    >>> valuefunc = lambda v: v.lower()
-    >>> reducefunc = lambda g: ''.join(g)
-    >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
-    [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
-
-    Each optional argument defaults to an identity function if not specified.
-
-    :func:`groupby_transform` is useful when grouping elements of an iterable
-    using a separate iterable as the key. To do this, :func:`zip` the iterables
-    and pass a *keyfunc* that extracts the first element and a *valuefunc*
-    that extracts the second element::
-
-        >>> from operator import itemgetter
-        >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
-        >>> values = 'abcdefghi'
-        >>> iterable = zip(keys, values)
-        >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
-        >>> [(k, ''.join(g)) for k, g in grouper]
-        [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
-
-    Note that the order of items in the iterable is significant.
-    Only adjacent items are grouped together, so if you don't want any
-    duplicate groups, you should sort the iterable by the key function.
-
-    """
-    ret = groupby(iterable, keyfunc)
-    if valuefunc:
-        ret = ((k, map(valuefunc, g)) for k, g in ret)
-    if reducefunc:
-        ret = ((k, reducefunc(g)) for k, g in ret)
-
-    return ret
-
-
-class numeric_range(abc.Sequence, abc.Hashable):
-    """An extension of the built-in ``range()`` function whose arguments can
-    be any orderable numeric type.
-
-    With only *stop* specified, *start* defaults to ``0`` and *step*
-    defaults to ``1``. The output items will match the type of *stop*:
-
-        >>> list(numeric_range(3.5))
-        [0.0, 1.0, 2.0, 3.0]
-
-    With only *start* and *stop* specified, *step* defaults to ``1``. The
-    output items will match the type of *start*:
-
-        >>> from decimal import Decimal
-        >>> start = Decimal('2.1')
-        >>> stop = Decimal('5.1')
-        >>> list(numeric_range(start, stop))
-        [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
-
-    With *start*, *stop*, and *step*  specified the output items will match
-    the type of ``start + step``:
-
-        >>> from fractions import Fraction
-        >>> start = Fraction(1, 2)  # Start at 1/2
-        >>> stop = Fraction(5, 2)  # End at 5/2
-        >>> step = Fraction(1, 2)  # Count by 1/2
-        >>> list(numeric_range(start, stop, step))
-        [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
-
-    If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
-
-        >>> list(numeric_range(3, -1, -1.0))
-        [3.0, 2.0, 1.0, 0.0]
-
-    Be aware of the limitations of floating point numbers; the representation
-    of the yielded numbers may be surprising.
-
-    ``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
-    is a ``datetime.timedelta`` object:
-
-        >>> import datetime
-        >>> start = datetime.datetime(2019, 1, 1)
-        >>> stop = datetime.datetime(2019, 1, 3)
-        >>> step = datetime.timedelta(days=1)
-        >>> items = iter(numeric_range(start, stop, step))
-        >>> next(items)
-        datetime.datetime(2019, 1, 1, 0, 0)
-        >>> next(items)
-        datetime.datetime(2019, 1, 2, 0, 0)
-
-    """
-
-    _EMPTY_HASH = hash(range(0, 0))
-
-    def __init__(self, *args):
-        argc = len(args)
-        if argc == 1:
-            (self._stop,) = args
-            self._start = type(self._stop)(0)
-            self._step = type(self._stop - self._start)(1)
-        elif argc == 2:
-            self._start, self._stop = args
-            self._step = type(self._stop - self._start)(1)
-        elif argc == 3:
-            self._start, self._stop, self._step = args
-        elif argc == 0:
-            raise TypeError(
-                'numeric_range expected at least '
-                '1 argument, got {}'.format(argc)
-            )
-        else:
-            raise TypeError(
-                'numeric_range expected at most '
-                '3 arguments, got {}'.format(argc)
-            )
-
-        self._zero = type(self._step)(0)
-        if self._step == self._zero:
-            raise ValueError('numeric_range() arg 3 must not be zero')
-        self._growing = self._step > self._zero
-        self._init_len()
-
-    def __bool__(self):
-        if self._growing:
-            return self._start < self._stop
-        else:
-            return self._start > self._stop
-
-    def __contains__(self, elem):
-        if self._growing:
-            if self._start <= elem < self._stop:
-                return (elem - self._start) % self._step == self._zero
-        else:
-            if self._start >= elem > self._stop:
-                return (self._start - elem) % (-self._step) == self._zero
-
-        return False
-
-    def __eq__(self, other):
-        if isinstance(other, numeric_range):
-            empty_self = not bool(self)
-            empty_other = not bool(other)
-            if empty_self or empty_other:
-                return empty_self and empty_other  # True if both empty
-            else:
-                return (
-                    self._start == other._start
-                    and self._step == other._step
-                    and self._get_by_index(-1) == other._get_by_index(-1)
-                )
-        else:
-            return False
-
-    def __getitem__(self, key):
-        if isinstance(key, int):
-            return self._get_by_index(key)
-        elif isinstance(key, slice):
-            step = self._step if key.step is None else key.step * self._step
-
-            if key.start is None or key.start <= -self._len:
-                start = self._start
-            elif key.start >= self._len:
-                start = self._stop
-            else:  # -self._len < key.start < self._len
-                start = self._get_by_index(key.start)
-
-            if key.stop is None or key.stop >= self._len:
-                stop = self._stop
-            elif key.stop <= -self._len:
-                stop = self._start
-            else:  # -self._len < key.stop < self._len
-                stop = self._get_by_index(key.stop)
-
-            return numeric_range(start, stop, step)
-        else:
-            raise TypeError(
-                'numeric range indices must be '
-                'integers or slices, not {}'.format(type(key).__name__)
-            )
-
-    def __hash__(self):
-        if self:
-            return hash((self._start, self._get_by_index(-1), self._step))
-        else:
-            return self._EMPTY_HASH
-
-    def __iter__(self):
-        values = (self._start + (n * self._step) for n in count())
-        if self._growing:
-            return takewhile(partial(gt, self._stop), values)
-        else:
-            return takewhile(partial(lt, self._stop), values)
-
-    def __len__(self):
-        return self._len
-
-    def _init_len(self):
-        if self._growing:
-            start = self._start
-            stop = self._stop
-            step = self._step
-        else:
-            start = self._stop
-            stop = self._start
-            step = -self._step
-        distance = stop - start
-        if distance <= self._zero:
-            self._len = 0
-        else:  # distance > 0 and step > 0: regular euclidean division
-            q, r = divmod(distance, step)
-            self._len = int(q) + int(r != self._zero)
-
-    def __reduce__(self):
-        return numeric_range, (self._start, self._stop, self._step)
-
-    def __repr__(self):
-        if self._step == 1:
-            return "numeric_range({}, {})".format(
-                repr(self._start), repr(self._stop)
-            )
-        else:
-            return "numeric_range({}, {}, {})".format(
-                repr(self._start), repr(self._stop), repr(self._step)
-            )
-
-    def __reversed__(self):
-        return iter(
-            numeric_range(
-                self._get_by_index(-1), self._start - self._step, -self._step
-            )
-        )
-
-    def count(self, value):
-        return int(value in self)
-
-    def index(self, value):
-        if self._growing:
-            if self._start <= value < self._stop:
-                q, r = divmod(value - self._start, self._step)
-                if r == self._zero:
-                    return int(q)
-        else:
-            if self._start >= value > self._stop:
-                q, r = divmod(self._start - value, -self._step)
-                if r == self._zero:
-                    return int(q)
-
-        raise ValueError("{} is not in numeric range".format(value))
-
-    def _get_by_index(self, i):
-        if i < 0:
-            i += self._len
-        if i < 0 or i >= self._len:
-            raise IndexError("numeric range object index out of range")
-        return self._start + i * self._step
-
-
-def count_cycle(iterable, n=None):
-    """Cycle through the items from *iterable* up to *n* times, yielding
-    the number of completed cycles along with each item. If *n* is omitted the
-    process repeats indefinitely.
-
-    >>> list(count_cycle('AB', 3))
-    [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
-
-    """
-    iterable = tuple(iterable)
-    if not iterable:
-        return iter(())
-    counter = count() if n is None else range(n)
-    return ((i, item) for i in counter for item in iterable)
-
-
-def mark_ends(iterable):
-    """Yield 3-tuples of the form ``(is_first, is_last, item)``.
-
-    >>> list(mark_ends('ABC'))
-    [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
-
-    Use this when looping over an iterable to take special action on its first
-    and/or last items:
-
-    >>> iterable = ['Header', 100, 200, 'Footer']
-    >>> total = 0
-    >>> for is_first, is_last, item in mark_ends(iterable):
-    ...     if is_first:
-    ...         continue  # Skip the header
-    ...     if is_last:
-    ...         continue  # Skip the footer
-    ...     total += item
-    >>> print(total)
-    300
-    """
-    it = iter(iterable)
-
-    try:
-        b = next(it)
-    except StopIteration:
-        return
-
-    try:
-        for i in count():
-            a = b
-            b = next(it)
-            yield i == 0, False, a
-
-    except StopIteration:
-        yield i == 0, True, a
-
-
-def locate(iterable, pred=bool, window_size=None):
-    """Yield the index of each item in *iterable* for which *pred* returns
-    ``True``.
-
-    *pred* defaults to :func:`bool`, which will select truthy items:
-
-        >>> list(locate([0, 1, 1, 0, 1, 0, 0]))
-        [1, 2, 4]
-
-    Set *pred* to a custom function to, e.g., find the indexes for a particular
-    item.
-
-        >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
-        [1, 3]
-
-    If *window_size* is given, then the *pred* function will be called with
-    that many items. This enables searching for sub-sequences:
-
-        >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
-        >>> pred = lambda *args: args == (1, 2, 3)
-        >>> list(locate(iterable, pred=pred, window_size=3))
-        [1, 5, 9]
-
-    Use with :func:`seekable` to find indexes and then retrieve the associated
-    items:
-
-        >>> from itertools import count
-        >>> from more_itertools import seekable
-        >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
-        >>> it = seekable(source)
-        >>> pred = lambda x: x > 100
-        >>> indexes = locate(it, pred=pred)
-        >>> i = next(indexes)
-        >>> it.seek(i)
-        >>> next(it)
-        106
-
-    """
-    if window_size is None:
-        return compress(count(), map(pred, iterable))
-
-    if window_size < 1:
-        raise ValueError('window size must be at least 1')
-
-    it = windowed(iterable, window_size, fillvalue=_marker)
-    return compress(count(), starmap(pred, it))
-
-
-def lstrip(iterable, pred):
-    """Yield the items from *iterable*, but strip any from the beginning
-    for which *pred* returns ``True``.
-
-    For example, to remove a set of items from the start of an iterable:
-
-        >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
-        >>> pred = lambda x: x in {None, False, ''}
-        >>> list(lstrip(iterable, pred))
-        [1, 2, None, 3, False, None]
-
-    This function is analogous to to :func:`str.lstrip`, and is essentially
-    an wrapper for :func:`itertools.dropwhile`.
-
-    """
-    return dropwhile(pred, iterable)
-
-
-def rstrip(iterable, pred):
-    """Yield the items from *iterable*, but strip any from the end
-    for which *pred* returns ``True``.
-
-    For example, to remove a set of items from the end of an iterable:
-
-        >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
-        >>> pred = lambda x: x in {None, False, ''}
-        >>> list(rstrip(iterable, pred))
-        [None, False, None, 1, 2, None, 3]
-
-    This function is analogous to :func:`str.rstrip`.
-
-    """
-    cache = []
-    cache_append = cache.append
-    cache_clear = cache.clear
-    for x in iterable:
-        if pred(x):
-            cache_append(x)
-        else:
-            yield from cache
-            cache_clear()
-            yield x
-
-
-def strip(iterable, pred):
-    """Yield the items from *iterable*, but strip any from the
-    beginning and end for which *pred* returns ``True``.
-
-    For example, to remove a set of items from both ends of an iterable:
-
-        >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
-        >>> pred = lambda x: x in {None, False, ''}
-        >>> list(strip(iterable, pred))
-        [1, 2, None, 3]
-
-    This function is analogous to :func:`str.strip`.
-
-    """
-    return rstrip(lstrip(iterable, pred), pred)
-
-
-class islice_extended:
-    """An extension of :func:`itertools.islice` that supports negative values
-    for *stop*, *start*, and *step*.
-
-        >>> iterable = iter('abcdefgh')
-        >>> list(islice_extended(iterable, -4, -1))
-        ['e', 'f', 'g']
-
-    Slices with negative values require some caching of *iterable*, but this
-    function takes care to minimize the amount of memory required.
-
-    For example, you can use a negative step with an infinite iterator:
-
-        >>> from itertools import count
-        >>> list(islice_extended(count(), 110, 99, -2))
-        [110, 108, 106, 104, 102, 100]
-
-    You can also use slice notation directly:
-
-        >>> iterable = map(str, count())
-        >>> it = islice_extended(iterable)[10:20:2]
-        >>> list(it)
-        ['10', '12', '14', '16', '18']
-
-    """
-
-    def __init__(self, iterable, *args):
-        it = iter(iterable)
-        if args:
-            self._iterable = _islice_helper(it, slice(*args))
-        else:
-            self._iterable = it
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        return next(self._iterable)
-
-    def __getitem__(self, key):
-        if isinstance(key, slice):
-            return islice_extended(_islice_helper(self._iterable, key))
-
-        raise TypeError('islice_extended.__getitem__ argument must be a slice')
-
-
-def _islice_helper(it, s):
-    start = s.start
-    stop = s.stop
-    if s.step == 0:
-        raise ValueError('step argument must be a non-zero integer or None.')
-    step = s.step or 1
-
-    if step > 0:
-        start = 0 if (start is None) else start
-
-        if start < 0:
-            # Consume all but the last -start items
-            cache = deque(enumerate(it, 1), maxlen=-start)
-            len_iter = cache[-1][0] if cache else 0
-
-            # Adjust start to be positive
-            i = max(len_iter + start, 0)
-
-            # Adjust stop to be positive
-            if stop is None:
-                j = len_iter
-            elif stop >= 0:
-                j = min(stop, len_iter)
-            else:
-                j = max(len_iter + stop, 0)
-
-            # Slice the cache
-            n = j - i
-            if n <= 0:
-                return
-
-            for index, item in islice(cache, 0, n, step):
-                yield item
-        elif (stop is not None) and (stop < 0):
-            # Advance to the start position
-            next(islice(it, start, start), None)
-
-            # When stop is negative, we have to carry -stop items while
-            # iterating
-            cache = deque(islice(it, -stop), maxlen=-stop)
-
-            for index, item in enumerate(it):
-                cached_item = cache.popleft()
-                if index % step == 0:
-                    yield cached_item
-                cache.append(item)
-        else:
-            # When both start and stop are positive we have the normal case
-            yield from islice(it, start, stop, step)
-    else:
-        start = -1 if (start is None) else start
-
-        if (stop is not None) and (stop < 0):
-            # Consume all but the last items
-            n = -stop - 1
-            cache = deque(enumerate(it, 1), maxlen=n)
-            len_iter = cache[-1][0] if cache else 0
-
-            # If start and stop are both negative they are comparable and
-            # we can just slice. Otherwise we can adjust start to be negative
-            # and then slice.
-            if start < 0:
-                i, j = start, stop
-            else:
-                i, j = min(start - len_iter, -1), None
-
-            for index, item in list(cache)[i:j:step]:
-                yield item
-        else:
-            # Advance to the stop position
-            if stop is not None:
-                m = stop + 1
-                next(islice(it, m, m), None)
-
-            # stop is positive, so if start is negative they are not comparable
-            # and we need the rest of the items.
-            if start < 0:
-                i = start
-                n = None
-            # stop is None and start is positive, so we just need items up to
-            # the start index.
-            elif stop is None:
-                i = None
-                n = start + 1
-            # Both stop and start are positive, so they are comparable.
-            else:
-                i = None
-                n = start - stop
-                if n <= 0:
-                    return
-
-            cache = list(islice(it, n))
-
-            yield from cache[i::step]
-
-
-def always_reversible(iterable):
-    """An extension of :func:`reversed` that supports all iterables, not
-    just those which implement the ``Reversible`` or ``Sequence`` protocols.
-
-        >>> print(*always_reversible(x for x in range(3)))
-        2 1 0
-
-    If the iterable is already reversible, this function returns the
-    result of :func:`reversed()`. If the iterable is not reversible,
-    this function will cache the remaining items in the iterable and
-    yield them in reverse order, which may require significant storage.
-    """
-    try:
-        return reversed(iterable)
-    except TypeError:
-        return reversed(list(iterable))
-
-
-def consecutive_groups(iterable, ordering=lambda x: x):
-    """Yield groups of consecutive items using :func:`itertools.groupby`.
-    The *ordering* function determines whether two items are adjacent by
-    returning their position.
-
-    By default, the ordering function is the identity function. This is
-    suitable for finding runs of numbers:
-
-        >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
-        >>> for group in consecutive_groups(iterable):
-        ...     print(list(group))
-        [1]
-        [10, 11, 12]
-        [20]
-        [30, 31, 32, 33]
-        [40]
-
-    For finding runs of adjacent letters, try using the :meth:`index` method
-    of a string of letters:
-
-        >>> from string import ascii_lowercase
-        >>> iterable = 'abcdfgilmnop'
-        >>> ordering = ascii_lowercase.index
-        >>> for group in consecutive_groups(iterable, ordering):
-        ...     print(list(group))
-        ['a', 'b', 'c', 'd']
-        ['f', 'g']
-        ['i']
-        ['l', 'm', 'n', 'o', 'p']
-
-    Each group of consecutive items is an iterator that shares it source with
-    *iterable*. When an an output group is advanced, the previous group is
-    no longer available unless its elements are copied (e.g., into a ``list``).
-
-        >>> iterable = [1, 2, 11, 12, 21, 22]
-        >>> saved_groups = []
-        >>> for group in consecutive_groups(iterable):
-        ...     saved_groups.append(list(group))  # Copy group elements
-        >>> saved_groups
-        [[1, 2], [11, 12], [21, 22]]
-
-    """
-    for k, g in groupby(
-        enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
-    ):
-        yield map(itemgetter(1), g)
-
-
-def difference(iterable, func=sub, *, initial=None):
-    """This function is the inverse of :func:`itertools.accumulate`. By default
-    it will compute the first difference of *iterable* using
-    :func:`operator.sub`:
-
-        >>> from itertools import accumulate
-        >>> iterable = accumulate([0, 1, 2, 3, 4])  # produces 0, 1, 3, 6, 10
-        >>> list(difference(iterable))
-        [0, 1, 2, 3, 4]
-
-    *func* defaults to :func:`operator.sub`, but other functions can be
-    specified. They will be applied as follows::
-
-        A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
-
-    For example, to do progressive division:
-
-        >>> iterable = [1, 2, 6, 24, 120]
-        >>> func = lambda x, y: x // y
-        >>> list(difference(iterable, func))
-        [1, 2, 3, 4, 5]
-
-    If the *initial* keyword is set, the first element will be skipped when
-    computing successive differences.
-
-        >>> it = [10, 11, 13, 16]  # from accumulate([1, 2, 3], initial=10)
-        >>> list(difference(it, initial=10))
-        [1, 2, 3]
-
-    """
-    a, b = tee(iterable)
-    try:
-        first = [next(b)]
-    except StopIteration:
-        return iter([])
-
-    if initial is not None:
-        first = []
-
-    return chain(first, starmap(func, zip(b, a)))
-
-
-class SequenceView(Sequence):
-    """Return a read-only view of the sequence object *target*.
-
-    :class:`SequenceView` objects are analogous to Python's built-in
-    "dictionary view" types. They provide a dynamic view of a sequence's items,
-    meaning that when the sequence updates, so does the view.
-
-        >>> seq = ['0', '1', '2']
-        >>> view = SequenceView(seq)
-        >>> view
-        SequenceView(['0', '1', '2'])
-        >>> seq.append('3')
-        >>> view
-        SequenceView(['0', '1', '2', '3'])
-
-    Sequence views support indexing, slicing, and length queries. They act
-    like the underlying sequence, except they don't allow assignment:
-
-        >>> view[1]
-        '1'
-        >>> view[1:-1]
-        ['1', '2']
-        >>> len(view)
-        4
-
-    Sequence views are useful as an alternative to copying, as they don't
-    require (much) extra storage.
-
-    """
-
-    def __init__(self, target):
-        if not isinstance(target, Sequence):
-            raise TypeError
-        self._target = target
-
-    def __getitem__(self, index):
-        return self._target[index]
-
-    def __len__(self):
-        return len(self._target)
-
-    def __repr__(self):
-        return '{}({})'.format(self.__class__.__name__, repr(self._target))
-
-
-class seekable:
-    """Wrap an iterator to allow for seeking backward and forward. This
-    progressively caches the items in the source iterable so they can be
-    re-visited.
-
-    Call :meth:`seek` with an index to seek to that position in the source
-    iterable.
-
-    To "reset" an iterator, seek to ``0``:
-
-        >>> from itertools import count
-        >>> it = seekable((str(n) for n in count()))
-        >>> next(it), next(it), next(it)
-        ('0', '1', '2')
-        >>> it.seek(0)
-        >>> next(it), next(it), next(it)
-        ('0', '1', '2')
-        >>> next(it)
-        '3'
-
-    You can also seek forward:
-
-        >>> it = seekable((str(n) for n in range(20)))
-        >>> it.seek(10)
-        >>> next(it)
-        '10'
-        >>> it.seek(20)  # Seeking past the end of the source isn't a problem
-        >>> list(it)
-        []
-        >>> it.seek(0)  # Resetting works even after hitting the end
-        >>> next(it), next(it), next(it)
-        ('0', '1', '2')
-
-    Call :meth:`peek` to look ahead one item without advancing the iterator:
-
-        >>> it = seekable('1234')
-        >>> it.peek()
-        '1'
-        >>> list(it)
-        ['1', '2', '3', '4']
-        >>> it.peek(default='empty')
-        'empty'
-
-    Before the iterator is at its end, calling :func:`bool` on it will return
-    ``True``. After it will return ``False``:
-
-        >>> it = seekable('5678')
-        >>> bool(it)
-        True
-        >>> list(it)
-        ['5', '6', '7', '8']
-        >>> bool(it)
-        False
-
-    You may view the contents of the cache with the :meth:`elements` method.
-    That returns a :class:`SequenceView`, a view that updates automatically:
-
-        >>> it = seekable((str(n) for n in range(10)))
-        >>> next(it), next(it), next(it)
-        ('0', '1', '2')
-        >>> elements = it.elements()
-        >>> elements
-        SequenceView(['0', '1', '2'])
-        >>> next(it)
-        '3'
-        >>> elements
-        SequenceView(['0', '1', '2', '3'])
-
-    By default, the cache grows as the source iterable progresses, so beware of
-    wrapping very large or infinite iterables. Supply *maxlen* to limit the
-    size of the cache (this of course limits how far back you can seek).
-
-        >>> from itertools import count
-        >>> it = seekable((str(n) for n in count()), maxlen=2)
-        >>> next(it), next(it), next(it), next(it)
-        ('0', '1', '2', '3')
-        >>> list(it.elements())
-        ['2', '3']
-        >>> it.seek(0)
-        >>> next(it), next(it), next(it), next(it)
-        ('2', '3', '4', '5')
-        >>> next(it)
-        '6'
-
-    """
-
-    def __init__(self, iterable, maxlen=None):
-        self._source = iter(iterable)
-        if maxlen is None:
-            self._cache = []
-        else:
-            self._cache = deque([], maxlen)
-        self._index = None
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        if self._index is not None:
-            try:
-                item = self._cache[self._index]
-            except IndexError:
-                self._index = None
-            else:
-                self._index += 1
-                return item
-
-        item = next(self._source)
-        self._cache.append(item)
-        return item
-
-    def __bool__(self):
-        try:
-            self.peek()
-        except StopIteration:
-            return False
-        return True
-
-    def peek(self, default=_marker):
-        try:
-            peeked = next(self)
-        except StopIteration:
-            if default is _marker:
-                raise
-            return default
-        if self._index is None:
-            self._index = len(self._cache)
-        self._index -= 1
-        return peeked
-
-    def elements(self):
-        return SequenceView(self._cache)
-
-    def seek(self, index):
-        self._index = index
-        remainder = index - len(self._cache)
-        if remainder > 0:
-            consume(self, remainder)
-
-
-class run_length:
-    """
-    :func:`run_length.encode` compresses an iterable with run-length encoding.
-    It yields groups of repeated items with the count of how many times they
-    were repeated:
-
-        >>> uncompressed = 'abbcccdddd'
-        >>> list(run_length.encode(uncompressed))
-        [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
-
-    :func:`run_length.decode` decompresses an iterable that was previously
-    compressed with run-length encoding. It yields the items of the
-    decompressed iterable:
-
-        >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
-        >>> list(run_length.decode(compressed))
-        ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
-
-    """
-
-    @staticmethod
-    def encode(iterable):
-        return ((k, ilen(g)) for k, g in groupby(iterable))
-
-    @staticmethod
-    def decode(iterable):
-        return chain.from_iterable(repeat(k, n) for k, n in iterable)
-
-
-def exactly_n(iterable, n, predicate=bool):
-    """Return ``True`` if exactly ``n`` items in the iterable are ``True``
-    according to the *predicate* function.
-
-        >>> exactly_n([True, True, False], 2)
-        True
-        >>> exactly_n([True, True, False], 1)
-        False
-        >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
-        True
-
-    The iterable will be advanced until ``n + 1`` truthy items are encountered,
-    so avoid calling it on infinite iterables.
-
-    """
-    return len(take(n + 1, filter(predicate, iterable))) == n
-
-
-def circular_shifts(iterable):
-    """Return a list of circular shifts of *iterable*.
-
-    >>> circular_shifts(range(4))
-    [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
-    """
-    lst = list(iterable)
-    return take(len(lst), windowed(cycle(lst), len(lst)))
-
-
-def make_decorator(wrapping_func, result_index=0):
-    """Return a decorator version of *wrapping_func*, which is a function that
-    modifies an iterable. *result_index* is the position in that function's
-    signature where the iterable goes.
-
-    This lets you use itertools on the "production end," i.e. at function
-    definition. This can augment what the function returns without changing the
-    function's code.
-
-    For example, to produce a decorator version of :func:`chunked`:
-
-        >>> from more_itertools import chunked
-        >>> chunker = make_decorator(chunked, result_index=0)
-        >>> @chunker(3)
-        ... def iter_range(n):
-        ...     return iter(range(n))
-        ...
-        >>> list(iter_range(9))
-        [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
-
-    To only allow truthy items to be returned:
-
-        >>> truth_serum = make_decorator(filter, result_index=1)
-        >>> @truth_serum(bool)
-        ... def boolean_test():
-        ...     return [0, 1, '', ' ', False, True]
-        ...
-        >>> list(boolean_test())
-        [1, ' ', True]
-
-    The :func:`peekable` and :func:`seekable` wrappers make for practical
-    decorators:
-
-        >>> from more_itertools import peekable
-        >>> peekable_function = make_decorator(peekable)
-        >>> @peekable_function()
-        ... def str_range(*args):
-        ...     return (str(x) for x in range(*args))
-        ...
-        >>> it = str_range(1, 20, 2)
-        >>> next(it), next(it), next(it)
-        ('1', '3', '5')
-        >>> it.peek()
-        '7'
-        >>> next(it)
-        '7'
-
-    """
-    # See https://sites.google.com/site/bbayles/index/decorator_factory for
-    # notes on how this works.
-    def decorator(*wrapping_args, **wrapping_kwargs):
-        def outer_wrapper(f):
-            def inner_wrapper(*args, **kwargs):
-                result = f(*args, **kwargs)
-                wrapping_args_ = list(wrapping_args)
-                wrapping_args_.insert(result_index, result)
-                return wrapping_func(*wrapping_args_, **wrapping_kwargs)
-
-            return inner_wrapper
-
-        return outer_wrapper
-
-    return decorator
-
-
-def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
-    """Return a dictionary that maps the items in *iterable* to categories
-    defined by *keyfunc*, transforms them with *valuefunc*, and
-    then summarizes them by category with *reducefunc*.
-
-    *valuefunc* defaults to the identity function if it is unspecified.
-    If *reducefunc* is unspecified, no summarization takes place:
-
-        >>> keyfunc = lambda x: x.upper()
-        >>> result = map_reduce('abbccc', keyfunc)
-        >>> sorted(result.items())
-        [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
-
-    Specifying *valuefunc* transforms the categorized items:
-
-        >>> keyfunc = lambda x: x.upper()
-        >>> valuefunc = lambda x: 1
-        >>> result = map_reduce('abbccc', keyfunc, valuefunc)
-        >>> sorted(result.items())
-        [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
-
-    Specifying *reducefunc* summarizes the categorized items:
-
-        >>> keyfunc = lambda x: x.upper()
-        >>> valuefunc = lambda x: 1
-        >>> reducefunc = sum
-        >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
-        >>> sorted(result.items())
-        [('A', 1), ('B', 2), ('C', 3)]
-
-    You may want to filter the input iterable before applying the map/reduce
-    procedure:
-
-        >>> all_items = range(30)
-        >>> items = [x for x in all_items if 10 <= x <= 20]  # Filter
-        >>> keyfunc = lambda x: x % 2  # Evens map to 0; odds to 1
-        >>> categories = map_reduce(items, keyfunc=keyfunc)
-        >>> sorted(categories.items())
-        [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
-        >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
-        >>> sorted(summaries.items())
-        [(0, 90), (1, 75)]
-
-    Note that all items in the iterable are gathered into a list before the
-    summarization step, which may require significant storage.
-
-    The returned object is a :obj:`collections.defaultdict` with the
-    ``default_factory`` set to ``None``, such that it behaves like a normal
-    dictionary.
-
-    """
-    valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
-
-    ret = defaultdict(list)
-    for item in iterable:
-        key = keyfunc(item)
-        value = valuefunc(item)
-        ret[key].append(value)
-
-    if reducefunc is not None:
-        for key, value_list in ret.items():
-            ret[key] = reducefunc(value_list)
-
-    ret.default_factory = None
-    return ret
-
-
-def rlocate(iterable, pred=bool, window_size=None):
-    """Yield the index of each item in *iterable* for which *pred* returns
-    ``True``, starting from the right and moving left.
-
-    *pred* defaults to :func:`bool`, which will select truthy items:
-
-        >>> list(rlocate([0, 1, 1, 0, 1, 0, 0]))  # Truthy at 1, 2, and 4
-        [4, 2, 1]
-
-    Set *pred* to a custom function to, e.g., find the indexes for a particular
-    item:
-
-        >>> iterable = iter('abcb')
-        >>> pred = lambda x: x == 'b'
-        >>> list(rlocate(iterable, pred))
-        [3, 1]
-
-    If *window_size* is given, then the *pred* function will be called with
-    that many items. This enables searching for sub-sequences:
-
-        >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
-        >>> pred = lambda *args: args == (1, 2, 3)
-        >>> list(rlocate(iterable, pred=pred, window_size=3))
-        [9, 5, 1]
-
-    Beware, this function won't return anything for infinite iterables.
-    If *iterable* is reversible, ``rlocate`` will reverse it and search from
-    the right. Otherwise, it will search from the left and return the results
-    in reverse order.
-
-    See :func:`locate` to for other example applications.
-
-    """
-    if window_size is None:
-        try:
-            len_iter = len(iterable)
-            return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
-        except TypeError:
-            pass
-
-    return reversed(list(locate(iterable, pred, window_size)))
-
-
-def replace(iterable, pred, substitutes, count=None, window_size=1):
-    """Yield the items from *iterable*, replacing the items for which *pred*
-    returns ``True`` with the items from the iterable *substitutes*.
-
-        >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
-        >>> pred = lambda x: x == 0
-        >>> substitutes = (2, 3)
-        >>> list(replace(iterable, pred, substitutes))
-        [1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
-
-    If *count* is given, the number of replacements will be limited:
-
-        >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
-        >>> pred = lambda x: x == 0
-        >>> substitutes = [None]
-        >>> list(replace(iterable, pred, substitutes, count=2))
-        [1, 1, None, 1, 1, None, 1, 1, 0]
-
-    Use *window_size* to control the number of items passed as arguments to
-    *pred*. This allows for locating and replacing subsequences.
-
-        >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
-        >>> window_size = 3
-        >>> pred = lambda *args: args == (0, 1, 2)  # 3 items passed to pred
-        >>> substitutes = [3, 4] # Splice in these items
-        >>> list(replace(iterable, pred, substitutes, window_size=window_size))
-        [3, 4, 5, 3, 4, 5]
-
-    """
-    if window_size < 1:
-        raise ValueError('window_size must be at least 1')
-
-    # Save the substitutes iterable, since it's used more than once
-    substitutes = tuple(substitutes)
-
-    # Add padding such that the number of windows matches the length of the
-    # iterable
-    it = chain(iterable, [_marker] * (window_size - 1))
-    windows = windowed(it, window_size)
-
-    n = 0
-    for w in windows:
-        # If the current window matches our predicate (and we haven't hit
-        # our maximum number of replacements), splice in the substitutes
-        # and then consume the following windows that overlap with this one.
-        # For example, if the iterable is (0, 1, 2, 3, 4...)
-        # and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
-        # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
-        if pred(*w):
-            if (count is None) or (n < count):
-                n += 1
-                yield from substitutes
-                consume(windows, window_size - 1)
-                continue
-
-        # If there was no match (or we've reached the replacement limit),
-        # yield the first item from the window.
-        if w and (w[0] is not _marker):
-            yield w[0]
-
-
-def partitions(iterable):
-    """Yield all possible order-preserving partitions of *iterable*.
-
-    >>> iterable = 'abc'
-    >>> for part in partitions(iterable):
-    ...     print([''.join(p) for p in part])
-    ['abc']
-    ['a', 'bc']
-    ['ab', 'c']
-    ['a', 'b', 'c']
-
-    This is unrelated to :func:`partition`.
-
-    """
-    sequence = list(iterable)
-    n = len(sequence)
-    for i in powerset(range(1, n)):
-        yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
-
-
-def set_partitions(iterable, k=None):
-    """
-    Yield the set partitions of *iterable* into *k* parts. Set partitions are
-    not order-preserving.
-
-    >>> iterable = 'abc'
-    >>> for part in set_partitions(iterable, 2):
-    ...     print([''.join(p) for p in part])
-    ['a', 'bc']
-    ['ab', 'c']
-    ['b', 'ac']
-
-
-    If *k* is not given, every set partition is generated.
-
-    >>> iterable = 'abc'
-    >>> for part in set_partitions(iterable):
-    ...     print([''.join(p) for p in part])
-    ['abc']
-    ['a', 'bc']
-    ['ab', 'c']
-    ['b', 'ac']
-    ['a', 'b', 'c']
-
-    """
-    L = list(iterable)
-    n = len(L)
-    if k is not None:
-        if k < 1:
-            raise ValueError(
-                "Can't partition in a negative or zero number of groups"
-            )
-        elif k > n:
-            return
-
-    def set_partitions_helper(L, k):
-        n = len(L)
-        if k == 1:
-            yield [L]
-        elif n == k:
-            yield [[s] for s in L]
-        else:
-            e, *M = L
-            for p in set_partitions_helper(M, k - 1):
-                yield [[e], *p]
-            for p in set_partitions_helper(M, k):
-                for i in range(len(p)):
-                    yield p[:i] + [[e] + p[i]] + p[i + 1 :]
-
-    if k is None:
-        for k in range(1, n + 1):
-            yield from set_partitions_helper(L, k)
-    else:
-        yield from set_partitions_helper(L, k)
-
-
-class time_limited:
-    """
-    Yield items from *iterable* until *limit_seconds* have passed.
-    If the time limit expires before all items have been yielded, the
-    ``timed_out`` parameter will be set to ``True``.
-
-    >>> from time import sleep
-    >>> def generator():
-    ...     yield 1
-    ...     yield 2
-    ...     sleep(0.2)
-    ...     yield 3
-    >>> iterable = time_limited(0.1, generator())
-    >>> list(iterable)
-    [1, 2]
-    >>> iterable.timed_out
-    True
-
-    Note that the time is checked before each item is yielded, and iteration
-    stops if  the time elapsed is greater than *limit_seconds*. If your time
-    limit is 1 second, but it takes 2 seconds to generate the first item from
-    the iterable, the function will run for 2 seconds and not yield anything.
-
-    """
-
-    def __init__(self, limit_seconds, iterable):
-        if limit_seconds < 0:
-            raise ValueError('limit_seconds must be positive')
-        self.limit_seconds = limit_seconds
-        self._iterable = iter(iterable)
-        self._start_time = monotonic()
-        self.timed_out = False
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        item = next(self._iterable)
-        if monotonic() - self._start_time > self.limit_seconds:
-            self.timed_out = True
-            raise StopIteration
-
-        return item
-
-
-def only(iterable, default=None, too_long=None):
-    """If *iterable* has only one item, return it.
-    If it has zero items, return *default*.
-    If it has more than one item, raise the exception given by *too_long*,
-    which is ``ValueError`` by default.
-
-    >>> only([], default='missing')
-    'missing'
-    >>> only([1])
-    1
-    >>> only([1, 2])  # doctest: +IGNORE_EXCEPTION_DETAIL
-    Traceback (most recent call last):
-    ...
-    ValueError: Expected exactly one item in iterable, but got 1, 2,
-     and perhaps more.'
-    >>> only([1, 2], too_long=TypeError)  # doctest: +IGNORE_EXCEPTION_DETAIL
-    Traceback (most recent call last):
-    ...
-    TypeError
-
-    Note that :func:`only` attempts to advance *iterable* twice to ensure there
-    is only one item.  See :func:`spy` or :func:`peekable` to check
-    iterable contents less destructively.
-    """
-    it = iter(iterable)
-    first_value = next(it, default)
-
-    try:
-        second_value = next(it)
-    except StopIteration:
-        pass
-    else:
-        msg = (
-            'Expected exactly one item in iterable, but got {!r}, {!r}, '
-            'and perhaps more.'.format(first_value, second_value)
-        )
-        raise too_long or ValueError(msg)
-
-    return first_value
-
-
-def ichunked(iterable, n):
-    """Break *iterable* into sub-iterables with *n* elements each.
-    :func:`ichunked` is like :func:`chunked`, but it yields iterables
-    instead of lists.
-
-    If the sub-iterables are read in order, the elements of *iterable*
-    won't be stored in memory.
-    If they are read out of order, :func:`itertools.tee` is used to cache
-    elements as necessary.
-
-    >>> from itertools import count
-    >>> all_chunks = ichunked(count(), 4)
-    >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
-    >>> list(c_2)  # c_1's elements have been cached; c_3's haven't been
-    [4, 5, 6, 7]
-    >>> list(c_1)
-    [0, 1, 2, 3]
-    >>> list(c_3)
-    [8, 9, 10, 11]
-
-    """
-    source = iter(iterable)
-
-    while True:
-        # Check to see whether we're at the end of the source iterable
-        item = next(source, _marker)
-        if item is _marker:
-            return
-
-        # Clone the source and yield an n-length slice
-        source, it = tee(chain([item], source))
-        yield islice(it, n)
-
-        # Advance the source iterable
-        consume(source, n)
-
-
-def distinct_combinations(iterable, r):
-    """Yield the distinct combinations of *r* items taken from *iterable*.
-
-        >>> list(distinct_combinations([0, 0, 1], 2))
-        [(0, 0), (0, 1)]
-
-    Equivalent to ``set(combinations(iterable))``, except duplicates are not
-    generated and thrown away. For larger input sequences this is much more
-    efficient.
-
-    """
-    if r < 0:
-        raise ValueError('r must be non-negative')
-    elif r == 0:
-        yield ()
-        return
-    pool = tuple(iterable)
-    generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
-    current_combo = [None] * r
-    level = 0
-    while generators:
-        try:
-            cur_idx, p = next(generators[-1])
-        except StopIteration:
-            generators.pop()
-            level -= 1
-            continue
-        current_combo[level] = p
-        if level + 1 == r:
-            yield tuple(current_combo)
-        else:
-            generators.append(
-                unique_everseen(
-                    enumerate(pool[cur_idx + 1 :], cur_idx + 1),
-                    key=itemgetter(1),
-                )
-            )
-            level += 1
-
-
-def filter_except(validator, iterable, *exceptions):
-    """Yield the items from *iterable* for which the *validator* function does
-    not raise one of the specified *exceptions*.
-
-    *validator* is called for each item in *iterable*.
-    It should be a function that accepts one argument and raises an exception
-    if that item is not valid.
-
-    >>> iterable = ['1', '2', 'three', '4', None]
-    >>> list(filter_except(int, iterable, ValueError, TypeError))
-    ['1', '2', '4']
-
-    If an exception other than one given by *exceptions* is raised by
-    *validator*, it is raised like normal.
-    """
-    for item in iterable:
-        try:
-            validator(item)
-        except exceptions:
-            pass
-        else:
-            yield item
-
-
-def map_except(function, iterable, *exceptions):
-    """Transform each item from *iterable* with *function* and yield the
-    result, unless *function* raises one of the specified *exceptions*.
-
-    *function* is called to transform each item in *iterable*.
-    It should accept one argument.
-
-    >>> iterable = ['1', '2', 'three', '4', None]
-    >>> list(map_except(int, iterable, ValueError, TypeError))
-    [1, 2, 4]
-
-    If an exception other than one given by *exceptions* is raised by
-    *function*, it is raised like normal.
-    """
-    for item in iterable:
-        try:
-            yield function(item)
-        except exceptions:
-            pass
-
-
-def map_if(iterable, pred, func, func_else=lambda x: x):
-    """Evaluate each item from *iterable* using *pred*. If the result is
-    equivalent to ``True``, transform the item with *func* and yield it.
-    Otherwise, transform the item with *func_else* and yield it.
-
-    *pred*, *func*, and *func_else* should each be functions that accept
-    one argument. By default, *func_else* is the identity function.
-
-    >>> from math import sqrt
-    >>> iterable = list(range(-5, 5))
-    >>> iterable
-    [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
-    >>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig'))
-    [-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig']
-    >>> list(map_if(iterable, lambda x: x >= 0,
-    ... lambda x: f'{sqrt(x):.2f}', lambda x: None))
-    [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00']
-    """
-    for item in iterable:
-        yield func(item) if pred(item) else func_else(item)
-
-
-def _sample_unweighted(iterable, k):
-    # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
-    # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
-
-    # Fill up the reservoir (collection of samples) with the first `k` samples
-    reservoir = take(k, iterable)
-
-    # Generate random number that's the largest in a sample of k U(0,1) numbers
-    # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
-    W = exp(log(random()) / k)
-
-    # The number of elements to skip before changing the reservoir is a random
-    # number with a geometric distribution. Sample it using random() and logs.
-    next_index = k + floor(log(random()) / log(1 - W))
-
-    for index, element in enumerate(iterable, k):
-
-        if index == next_index:
-            reservoir[randrange(k)] = element
-            # The new W is the largest in a sample of k U(0, `old_W`) numbers
-            W *= exp(log(random()) / k)
-            next_index += floor(log(random()) / log(1 - W)) + 1
-
-    return reservoir
-
-
-def _sample_weighted(iterable, k, weights):
-    # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
-    # "Weighted random sampling with a reservoir".
-
-    # Log-transform for numerical stability for weights that are small/large
-    weight_keys = (log(random()) / weight for weight in weights)
-
-    # Fill up the reservoir (collection of samples) with the first `k`
-    # weight-keys and elements, then heapify the list.
-    reservoir = take(k, zip(weight_keys, iterable))
-    heapify(reservoir)
-
-    # The number of jumps before changing the reservoir is a random variable
-    # with an exponential distribution. Sample it using random() and logs.
-    smallest_weight_key, _ = reservoir[0]
-    weights_to_skip = log(random()) / smallest_weight_key
-
-    for weight, element in zip(weights, iterable):
-        if weight >= weights_to_skip:
-            # The notation here is consistent with the paper, but we store
-            # the weight-keys in log-space for better numerical stability.
-            smallest_weight_key, _ = reservoir[0]
-            t_w = exp(weight * smallest_weight_key)
-            r_2 = uniform(t_w, 1)  # generate U(t_w, 1)
-            weight_key = log(r_2) / weight
-            heapreplace(reservoir, (weight_key, element))
-            smallest_weight_key, _ = reservoir[0]
-            weights_to_skip = log(random()) / smallest_weight_key
-        else:
-            weights_to_skip -= weight
-
-    # Equivalent to [element for weight_key, element in sorted(reservoir)]
-    return [heappop(reservoir)[1] for _ in range(k)]
-
-
-def sample(iterable, k, weights=None):
-    """Return a *k*-length list of elements chosen (without replacement)
-    from the *iterable*. Like :func:`random.sample`, but works on iterables
-    of unknown length.
-
-    >>> iterable = range(100)
-    >>> sample(iterable, 5)  # doctest: +SKIP
-    [81, 60, 96, 16, 4]
-
-    An iterable with *weights* may also be given:
-
-    >>> iterable = range(100)
-    >>> weights = (i * i + 1 for i in range(100))
-    >>> sampled = sample(iterable, 5, weights=weights)  # doctest: +SKIP
-    [79, 67, 74, 66, 78]
-
-    The algorithm can also be used to generate weighted random permutations.
-    The relative weight of each item determines the probability that it
-    appears late in the permutation.
-
-    >>> data = "abcdefgh"
-    >>> weights = range(1, len(data) + 1)
-    >>> sample(data, k=len(data), weights=weights)  # doctest: +SKIP
-    ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
-    """
-    if k == 0:
-        return []
-
-    iterable = iter(iterable)
-    if weights is None:
-        return _sample_unweighted(iterable, k)
-    else:
-        weights = iter(weights)
-        return _sample_weighted(iterable, k, weights)
-
-
-def is_sorted(iterable, key=None, reverse=False, strict=False):
-    """Returns ``True`` if the items of iterable are in sorted order, and
-    ``False`` otherwise. *key* and *reverse* have the same meaning that they do
-    in the built-in :func:`sorted` function.
-
-    >>> is_sorted(['1', '2', '3', '4', '5'], key=int)
-    True
-    >>> is_sorted([5, 4, 3, 1, 2], reverse=True)
-    False
-
-    If *strict*, tests for strict sorting, that is, returns ``False`` if equal
-    elements are found:
-
-    >>> is_sorted([1, 2, 2])
-    True
-    >>> is_sorted([1, 2, 2], strict=True)
-    False
-
-    The function returns ``False`` after encountering the first out-of-order
-    item. If there are no out-of-order items, the iterable is exhausted.
-    """
-
-    compare = (le if reverse else ge) if strict else (lt if reverse else gt)
-    it = iterable if key is None else map(key, iterable)
-    return not any(starmap(compare, pairwise(it)))
-
-
-class AbortThread(BaseException):
-    pass
-
-
-class callback_iter:
-    """Convert a function that uses callbacks to an iterator.
-
-    Let *func* be a function that takes a `callback` keyword argument.
-    For example:
-
-    >>> def func(callback=None):
-    ...     for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
-    ...         if callback:
-    ...             callback(i, c)
-    ...     return 4
-
-
-    Use ``with callback_iter(func)`` to get an iterator over the parameters
-    that are delivered to the callback.
-
-    >>> with callback_iter(func) as it:
-    ...     for args, kwargs in it:
-    ...         print(args)
-    (1, 'a')
-    (2, 'b')
-    (3, 'c')
-
-    The function will be called in a background thread. The ``done`` property
-    indicates whether it has completed execution.
-
-    >>> it.done
-    True
-
-    If it completes successfully, its return value will be available
-    in the ``result`` property.
-
-    >>> it.result
-    4
-
-    Notes:
-
-    * If the function uses some keyword argument besides ``callback``, supply
-      *callback_kwd*.
-    * If it finished executing, but raised an exception, accessing the
-      ``result`` property will raise the same exception.
-    * If it hasn't finished executing, accessing the ``result``
-      property from within the ``with`` block will raise ``RuntimeError``.
-    * If it hasn't finished executing, accessing the ``result`` property from
-      outside the ``with`` block will raise a
-      ``more_itertools.AbortThread`` exception.
-    * Provide *wait_seconds* to adjust how frequently the it is polled for
-      output.
-
-    """
-
-    def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
-        self._func = func
-        self._callback_kwd = callback_kwd
-        self._aborted = False
-        self._future = None
-        self._wait_seconds = wait_seconds
-        self._executor = __import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1)
-        self._iterator = self._reader()
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_value, traceback):
-        self._aborted = True
-        self._executor.shutdown()
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        return next(self._iterator)
-
-    @property
-    def done(self):
-        if self._future is None:
-            return False
-        return self._future.done()
-
-    @property
-    def result(self):
-        if not self.done:
-            raise RuntimeError('Function has not yet completed')
-
-        return self._future.result()
-
-    def _reader(self):
-        q = Queue()
-
-        def callback(*args, **kwargs):
-            if self._aborted:
-                raise AbortThread('canceled by user')
-
-            q.put((args, kwargs))
-
-        self._future = self._executor.submit(
-            self._func, **{self._callback_kwd: callback}
-        )
-
-        while True:
-            try:
-                item = q.get(timeout=self._wait_seconds)
-            except Empty:
-                pass
-            else:
-                q.task_done()
-                yield item
-
-            if self._future.done():
-                break
-
-        remaining = []
-        while True:
-            try:
-                item = q.get_nowait()
-            except Empty:
-                break
-            else:
-                q.task_done()
-                remaining.append(item)
-        q.join()
-        yield from remaining
-
-
-def windowed_complete(iterable, n):
-    """
-    Yield ``(beginning, middle, end)`` tuples, where:
-
-    * Each ``middle`` has *n* items from *iterable*
-    * Each ``beginning`` has the items before the ones in ``middle``
-    * Each ``end`` has the items after the ones in ``middle``
-
-    >>> iterable = range(7)
-    >>> n = 3
-    >>> for beginning, middle, end in windowed_complete(iterable, n):
-    ...     print(beginning, middle, end)
-    () (0, 1, 2) (3, 4, 5, 6)
-    (0,) (1, 2, 3) (4, 5, 6)
-    (0, 1) (2, 3, 4) (5, 6)
-    (0, 1, 2) (3, 4, 5) (6,)
-    (0, 1, 2, 3) (4, 5, 6) ()
-
-    Note that *n* must be at least 0 and most equal to the length of
-    *iterable*.
-
-    This function will exhaust the iterable and may require significant
-    storage.
-    """
-    if n < 0:
-        raise ValueError('n must be >= 0')
-
-    seq = tuple(iterable)
-    size = len(seq)
-
-    if n > size:
-        raise ValueError('n must be <= len(seq)')
-
-    for i in range(size - n + 1):
-        beginning = seq[:i]
-        middle = seq[i : i + n]
-        end = seq[i + n :]
-        yield beginning, middle, end
-
-
-def all_unique(iterable, key=None):
-    """
-    Returns ``True`` if all the elements of *iterable* are unique (no two
-    elements are equal).
-
-        >>> all_unique('ABCB')
-        False
-
-    If a *key* function is specified, it will be used to make comparisons.
-
-        >>> all_unique('ABCb')
-        True
-        >>> all_unique('ABCb', str.lower)
-        False
-
-    The function returns as soon as the first non-unique element is
-    encountered. Iterables with a mix of hashable and unhashable items can
-    be used, but the function will be slower for unhashable items.
-    """
-    seenset = set()
-    seenset_add = seenset.add
-    seenlist = []
-    seenlist_add = seenlist.append
-    for element in map(key, iterable) if key else iterable:
-        try:
-            if element in seenset:
-                return False
-            seenset_add(element)
-        except TypeError:
-            if element in seenlist:
-                return False
-            seenlist_add(element)
-    return True
-
-
-def nth_product(index, *args):
-    """Equivalent to ``list(product(*args))[index]``.
-
-    The products of *args* can be ordered lexicographically.
-    :func:`nth_product` computes the product at sort position *index* without
-    computing the previous products.
-
-        >>> nth_product(8, range(2), range(2), range(2), range(2))
-        (1, 0, 0, 0)
-
-    ``IndexError`` will be raised if the given *index* is invalid.
-    """
-    pools = list(map(tuple, reversed(args)))
-    ns = list(map(len, pools))
-
-    c = reduce(mul, ns)
-
-    if index < 0:
-        index += c
-
-    if not 0 <= index < c:
-        raise IndexError
-
-    result = []
-    for pool, n in zip(pools, ns):
-        result.append(pool[index % n])
-        index //= n
-
-    return tuple(reversed(result))
-
-
-def nth_permutation(iterable, r, index):
-    """Equivalent to ``list(permutations(iterable, r))[index]```
-
-    The subsequences of *iterable* that are of length *r* where order is
-    important can be ordered lexicographically. :func:`nth_permutation`
-    computes the subsequence at sort position *index* directly, without
-    computing the previous subsequences.
-
-        >>> nth_permutation('ghijk', 2, 5)
-        ('h', 'i')
-
-    ``ValueError`` will be raised If *r* is negative or greater than the length
-    of *iterable*.
-    ``IndexError`` will be raised if the given *index* is invalid.
-    """
-    pool = list(iterable)
-    n = len(pool)
-
-    if r is None or r == n:
-        r, c = n, factorial(n)
-    elif not 0 <= r < n:
-        raise ValueError
-    else:
-        c = factorial(n) // factorial(n - r)
-
-    if index < 0:
-        index += c
-
-    if not 0 <= index < c:
-        raise IndexError
-
-    if c == 0:
-        return tuple()
-
-    result = [0] * r
-    q = index * factorial(n) // c if r < n else index
-    for d in range(1, n + 1):
-        q, i = divmod(q, d)
-        if 0 <= n - d < r:
-            result[n - d] = i
-        if q == 0:
-            break
-
-    return tuple(map(pool.pop, result))
-
-
-def value_chain(*args):
-    """Yield all arguments passed to the function in the same order in which
-    they were passed. If an argument itself is iterable then iterate over its
-    values.
-
-        >>> list(value_chain(1, 2, 3, [4, 5, 6]))
-        [1, 2, 3, 4, 5, 6]
-
-    Binary and text strings are not considered iterable and are emitted
-    as-is:
-
-        >>> list(value_chain('12', '34', ['56', '78']))
-        ['12', '34', '56', '78']
-
-
-    Multiple levels of nesting are not flattened.
-
-    """
-    for value in args:
-        if isinstance(value, (str, bytes)):
-            yield value
-            continue
-        try:
-            yield from value
-        except TypeError:
-            yield value
-
-
-def product_index(element, *args):
-    """Equivalent to ``list(product(*args)).index(element)``
-
-    The products of *args* can be ordered lexicographically.
-    :func:`product_index` computes the first index of *element* without
-    computing the previous products.
-
-        >>> product_index([8, 2], range(10), range(5))
-        42
-
-    ``ValueError`` will be raised if the given *element* isn't in the product
-    of *args*.
-    """
-    index = 0
-
-    for x, pool in zip_longest(element, args, fillvalue=_marker):
-        if x is _marker or pool is _marker:
-            raise ValueError('element is not a product of args')
-
-        pool = tuple(pool)
-        index = index * len(pool) + pool.index(x)
-
-    return index
-
-
-def combination_index(element, iterable):
-    """Equivalent to ``list(combinations(iterable, r)).index(element)``
-
-    The subsequences of *iterable* that are of length *r* can be ordered
-    lexicographically. :func:`combination_index` computes the index of the
-    first *element*, without computing the previous combinations.
-
-        >>> combination_index('adf', 'abcdefg')
-        10
-
-    ``ValueError`` will be raised if the given *element* isn't one of the
-    combinations of *iterable*.
-    """
-    element = enumerate(element)
-    k, y = next(element, (None, None))
-    if k is None:
-        return 0
-
-    indexes = []
-    pool = enumerate(iterable)
-    for n, x in pool:
-        if x == y:
-            indexes.append(n)
-            tmp, y = next(element, (None, None))
-            if tmp is None:
-                break
-            else:
-                k = tmp
-    else:
-        raise ValueError('element is not a combination of iterable')
-
-    n, _ = last(pool, default=(n, None))
-
-    # Python versiosn below 3.8 don't have math.comb
-    index = 1
-    for i, j in enumerate(reversed(indexes), start=1):
-        j = n - j
-        if i <= j:
-            index += factorial(j) // (factorial(i) * factorial(j - i))
-
-    return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index
-
-
-def permutation_index(element, iterable):
-    """Equivalent to ``list(permutations(iterable, r)).index(element)```
-
-    The subsequences of *iterable* that are of length *r* where order is
-    important can be ordered lexicographically. :func:`permutation_index`
-    computes the index of the first *element* directly, without computing
-    the previous permutations.
-
-        >>> permutation_index([1, 3, 2], range(5))
-        19
-
-    ``ValueError`` will be raised if the given *element* isn't one of the
-    permutations of *iterable*.
-    """
-    index = 0
-    pool = list(iterable)
-    for i, x in zip(range(len(pool), -1, -1), element):
-        r = pool.index(x)
-        index = index * i + r
-        del pool[r]
-
-    return index
-
-
-class countable:
-    """Wrap *iterable* and keep a count of how many items have been consumed.
-
-    The ``items_seen`` attribute starts at ``0`` and increments as the iterable
-    is consumed:
-
-        >>> iterable = map(str, range(10))
-        >>> it = countable(iterable)
-        >>> it.items_seen
-        0
-        >>> next(it), next(it)
-        ('0', '1')
-        >>> list(it)
-        ['2', '3', '4', '5', '6', '7', '8', '9']
-        >>> it.items_seen
-        10
-    """
-
-    def __init__(self, iterable):
-        self._it = iter(iterable)
-        self.items_seen = 0
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        item = next(self._it)
-        self.items_seen += 1
-
-        return item
-
-
-def chunked_even(iterable, n):
-    """Break *iterable* into lists of approximately length *n*.
-    Items are distributed such the lengths of the lists differ by at most
-    1 item.
-
-    >>> iterable = [1, 2, 3, 4, 5, 6, 7]
-    >>> n = 3
-    >>> list(chunked_even(iterable, n))  # List lengths: 3, 2, 2
-    [[1, 2, 3], [4, 5], [6, 7]]
-    >>> list(chunked(iterable, n))  # List lengths: 3, 3, 1
-    [[1, 2, 3], [4, 5, 6], [7]]
-
-    """
-
-    len_method = getattr(iterable, '__len__', None)
-
-    if len_method is None:
-        return _chunked_even_online(iterable, n)
-    else:
-        return _chunked_even_finite(iterable, len_method(), n)
-
-
-def _chunked_even_online(iterable, n):
-    buffer = []
-    maxbuf = n + (n - 2) * (n - 1)
-    for x in iterable:
-        buffer.append(x)
-        if len(buffer) == maxbuf:
-            yield buffer[:n]
-            buffer = buffer[n:]
-    yield from _chunked_even_finite(buffer, len(buffer), n)
-
-
-def _chunked_even_finite(iterable, N, n):
-    if N < 1:
-        return
-
-    # Lists are either size `full_size <= n` or `partial_size = full_size - 1`
-    q, r = divmod(N, n)
-    num_lists = q + (1 if r > 0 else 0)
-    q, r = divmod(N, num_lists)
-    full_size = q + (1 if r > 0 else 0)
-    partial_size = full_size - 1
-    num_full = N - partial_size * num_lists
-    num_partial = num_lists - num_full
-
-    buffer = []
-    iterator = iter(iterable)
-
-    # Yield num_full lists of full_size
-    for x in iterator:
-        buffer.append(x)
-        if len(buffer) == full_size:
-            yield buffer
-            buffer = []
-            num_full -= 1
-            if num_full <= 0:
-                break
-
-    # Yield num_partial lists of partial_size
-    for x in iterator:
-        buffer.append(x)
-        if len(buffer) == partial_size:
-            yield buffer
-            buffer = []
-            num_partial -= 1
-
-
-def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False):
-    """A version of :func:`zip` that "broadcasts" any scalar
-    (i.e., non-iterable) items into output tuples.
-
-    >>> iterable_1 = [1, 2, 3]
-    >>> iterable_2 = ['a', 'b', 'c']
-    >>> scalar = '_'
-    >>> list(zip_broadcast(iterable_1, iterable_2, scalar))
-    [(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')]
-
-    The *scalar_types* keyword argument determines what types are considered
-    scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to
-    treat strings and byte strings as iterable:
-
-    >>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None))
-    [('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')]
-
-    If the *strict* keyword argument is ``True``, then
-    ``UnequalIterablesError`` will be raised if any of the iterables have
-    different lengthss.
-    """
-
-    def is_scalar(obj):
-        if scalar_types and isinstance(obj, scalar_types):
-            return True
-        try:
-            iter(obj)
-        except TypeError:
-            return True
-        else:
-            return False
-
-    size = len(objects)
-    if not size:
-        return
-
-    iterables, iterable_positions = [], []
-    scalars, scalar_positions = [], []
-    for i, obj in enumerate(objects):
-        if is_scalar(obj):
-            scalars.append(obj)
-            scalar_positions.append(i)
-        else:
-            iterables.append(iter(obj))
-            iterable_positions.append(i)
-
-    if len(scalars) == size:
-        yield tuple(objects)
-        return
-
-    zipper = _zip_equal if strict else zip
-    for item in zipper(*iterables):
-        new_item = [None] * size
-
-        for i, elem in zip(iterable_positions, item):
-            new_item[i] = elem
-
-        for i, elem in zip(scalar_positions, scalars):
-            new_item[i] = elem
-
-        yield tuple(new_item)
-
-
-def unique_in_window(iterable, n, key=None):
-    """Yield the items from *iterable* that haven't been seen recently.
-    *n* is the size of the lookback window.
-
-        >>> iterable = [0, 1, 0, 2, 3, 0]
-        >>> n = 3
-        >>> list(unique_in_window(iterable, n))
-        [0, 1, 2, 3, 0]
-
-    The *key* function, if provided, will be used to determine uniqueness:
-
-        >>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower()))
-        ['a', 'b', 'c', 'd', 'a']
-
-    The items in *iterable* must be hashable.
-
-    """
-    if n <= 0:
-        raise ValueError('n must be greater than 0')
-
-    window = deque(maxlen=n)
-    uniques = set()
-    use_key = key is not None
-
-    for item in iterable:
-        k = key(item) if use_key else item
-        if k in uniques:
-            continue
-
-        if len(uniques) == n:
-            uniques.discard(window[0])
-
-        uniques.add(k)
-        window.append(k)
-
-        yield item
-
-
-def duplicates_everseen(iterable, key=None):
-    """Yield duplicate elements after their first appearance.
-
-    >>> list(duplicates_everseen('mississippi'))
-    ['s', 'i', 's', 's', 'i', 'p', 'i']
-    >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower))
-    ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a']
-
-    This function is analagous to :func:`unique_everseen` and is subject to
-    the same performance considerations.
-
-    """
-    seen_set = set()
-    seen_list = []
-    use_key = key is not None
-
-    for element in iterable:
-        k = key(element) if use_key else element
-        try:
-            if k not in seen_set:
-                seen_set.add(k)
-            else:
-                yield element
-        except TypeError:
-            if k not in seen_list:
-                seen_list.append(k)
-            else:
-                yield element
-
-
-def duplicates_justseen(iterable, key=None):
-    """Yields serially-duplicate elements after their first appearance.
-
-    >>> list(duplicates_justseen('mississippi'))
-    ['s', 's', 'p']
-    >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower))
-    ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']
-
-    This function is analagous to :func:`unique_justseen`.
-
-    """
-    return flatten(
-        map(
-            lambda group_tuple: islice_extended(group_tuple[1])[1:],
-            groupby(iterable, key),
-        )
-    )
-
-
-def minmax(iterable_or_value, *others, key=None, default=_marker):
-    """Returns both the smallest and largest items in an iterable
-    or the largest of two or more arguments.
-
-        >>> minmax([3, 1, 5])
-        (1, 5)
-
-        >>> minmax(4, 2, 6)
-        (2, 6)
-
-    If a *key* function is provided, it will be used to transform the input
-    items for comparison.
-
-        >>> minmax([5, 30], key=str)  # '30' sorts before '5'
-        (30, 5)
-
-    If a *default* value is provided, it will be returned if there are no
-    input items.
-
-        >>> minmax([], default=(0, 0))
-        (0, 0)
-
-    Otherwise ``ValueError`` is raised.
-
-    This function is based on the
-    `recipe <http://code.activestate.com/recipes/577916/>`__ by
-    Raymond Hettinger and takes care to minimize the number of comparisons
-    performed.
-    """
-    iterable = (iterable_or_value, *others) if others else iterable_or_value
-
-    it = iter(iterable)
-
-    try:
-        lo = hi = next(it)
-    except StopIteration as e:
-        if default is _marker:
-            raise ValueError(
-                '`minmax()` argument is an empty iterable. '
-                'Provide a `default` value to suppress this error.'
-            ) from e
-        return default
-
-    # Different branches depending on the presence of key. This saves a lot
-    # of unimportant copies which would slow the "key=None" branch
-    # significantly down.
-    if key is None:
-        for x, y in zip_longest(it, it, fillvalue=lo):
-            if y < x:
-                x, y = y, x
-            if x < lo:
-                lo = x
-            if hi < y:
-                hi = y
-
-    else:
-        lo_key = hi_key = key(lo)
-
-        for x, y in zip_longest(it, it, fillvalue=lo):
-
-            x_key, y_key = key(x), key(y)
-
-            if y_key < x_key:
-                x, y, x_key, y_key = y, x, y_key, x_key
-            if x_key < lo_key:
-                lo, lo_key = x, x_key
-            if hi_key < y_key:
-                hi, hi_key = y, y_key
-
-    return lo, hi
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/recipes.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/recipes.py
deleted file mode 100644
index a259642..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/more_itertools/recipes.py
+++ /dev/null
@@ -1,698 +0,0 @@
-"""Imported from the recipes section of the itertools documentation.
-
-All functions taken from the recipes section of the itertools library docs
-[1]_.
-Some backward-compatible usability improvements have been made.
-
-.. [1] http://docs.python.org/library/itertools.html#recipes
-
-"""
-import warnings
-from collections import deque
-from itertools import (
-    chain,
-    combinations,
-    count,
-    cycle,
-    groupby,
-    islice,
-    repeat,
-    starmap,
-    tee,
-    zip_longest,
-)
-import operator
-from random import randrange, sample, choice
-
-__all__ = [
-    'all_equal',
-    'before_and_after',
-    'consume',
-    'convolve',
-    'dotproduct',
-    'first_true',
-    'flatten',
-    'grouper',
-    'iter_except',
-    'ncycles',
-    'nth',
-    'nth_combination',
-    'padnone',
-    'pad_none',
-    'pairwise',
-    'partition',
-    'powerset',
-    'prepend',
-    'quantify',
-    'random_combination_with_replacement',
-    'random_combination',
-    'random_permutation',
-    'random_product',
-    'repeatfunc',
-    'roundrobin',
-    'sliding_window',
-    'tabulate',
-    'tail',
-    'take',
-    'triplewise',
-    'unique_everseen',
-    'unique_justseen',
-]
-
-
-def take(n, iterable):
-    """Return first *n* items of the iterable as a list.
-
-        >>> take(3, range(10))
-        [0, 1, 2]
-
-    If there are fewer than *n* items in the iterable, all of them are
-    returned.
-
-        >>> take(10, range(3))
-        [0, 1, 2]
-
-    """
-    return list(islice(iterable, n))
-
-
-def tabulate(function, start=0):
-    """Return an iterator over the results of ``func(start)``,
-    ``func(start + 1)``, ``func(start + 2)``...
-
-    *func* should be a function that accepts one integer argument.
-
-    If *start* is not specified it defaults to 0. It will be incremented each
-    time the iterator is advanced.
-
-        >>> square = lambda x: x ** 2
-        >>> iterator = tabulate(square, -3)
-        >>> take(4, iterator)
-        [9, 4, 1, 0]
-
-    """
-    return map(function, count(start))
-
-
-def tail(n, iterable):
-    """Return an iterator over the last *n* items of *iterable*.
-
-    >>> t = tail(3, 'ABCDEFG')
-    >>> list(t)
-    ['E', 'F', 'G']
-
-    """
-    return iter(deque(iterable, maxlen=n))
-
-
-def consume(iterator, n=None):
-    """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
-    entirely.
-
-    Efficiently exhausts an iterator without returning values. Defaults to
-    consuming the whole iterator, but an optional second argument may be
-    provided to limit consumption.
-
-        >>> i = (x for x in range(10))
-        >>> next(i)
-        0
-        >>> consume(i, 3)
-        >>> next(i)
-        4
-        >>> consume(i)
-        >>> next(i)
-        Traceback (most recent call last):
-          File "<stdin>", line 1, in <module>
-        StopIteration
-
-    If the iterator has fewer items remaining than the provided limit, the
-    whole iterator will be consumed.
-
-        >>> i = (x for x in range(3))
-        >>> consume(i, 5)
-        >>> next(i)
-        Traceback (most recent call last):
-          File "<stdin>", line 1, in <module>
-        StopIteration
-
-    """
-    # Use functions that consume iterators at C speed.
-    if n is None:
-        # feed the entire iterator into a zero-length deque
-        deque(iterator, maxlen=0)
-    else:
-        # advance to the empty slice starting at position n
-        next(islice(iterator, n, n), None)
-
-
-def nth(iterable, n, default=None):
-    """Returns the nth item or a default value.
-
-    >>> l = range(10)
-    >>> nth(l, 3)
-    3
-    >>> nth(l, 20, "zebra")
-    'zebra'
-
-    """
-    return next(islice(iterable, n, None), default)
-
-
-def all_equal(iterable):
-    """
-    Returns ``True`` if all the elements are equal to each other.
-
-        >>> all_equal('aaaa')
-        True
-        >>> all_equal('aaab')
-        False
-
-    """
-    g = groupby(iterable)
-    return next(g, True) and not next(g, False)
-
-
-def quantify(iterable, pred=bool):
-    """Return the how many times the predicate is true.
-
-    >>> quantify([True, False, True])
-    2
-
-    """
-    return sum(map(pred, iterable))
-
-
-def pad_none(iterable):
-    """Returns the sequence of elements and then returns ``None`` indefinitely.
-
-        >>> take(5, pad_none(range(3)))
-        [0, 1, 2, None, None]
-
-    Useful for emulating the behavior of the built-in :func:`map` function.
-
-    See also :func:`padded`.
-
-    """
-    return chain(iterable, repeat(None))
-
-
-padnone = pad_none
-
-
-def ncycles(iterable, n):
-    """Returns the sequence elements *n* times
-
-    >>> list(ncycles(["a", "b"], 3))
-    ['a', 'b', 'a', 'b', 'a', 'b']
-
-    """
-    return chain.from_iterable(repeat(tuple(iterable), n))
-
-
-def dotproduct(vec1, vec2):
-    """Returns the dot product of the two iterables.
-
-    >>> dotproduct([10, 10], [20, 20])
-    400
-
-    """
-    return sum(map(operator.mul, vec1, vec2))
-
-
-def flatten(listOfLists):
-    """Return an iterator flattening one level of nesting in a list of lists.
-
-        >>> list(flatten([[0, 1], [2, 3]]))
-        [0, 1, 2, 3]
-
-    See also :func:`collapse`, which can flatten multiple levels of nesting.
-
-    """
-    return chain.from_iterable(listOfLists)
-
-
-def repeatfunc(func, times=None, *args):
-    """Call *func* with *args* repeatedly, returning an iterable over the
-    results.
-
-    If *times* is specified, the iterable will terminate after that many
-    repetitions:
-
-        >>> from operator import add
-        >>> times = 4
-        >>> args = 3, 5
-        >>> list(repeatfunc(add, times, *args))
-        [8, 8, 8, 8]
-
-    If *times* is ``None`` the iterable will not terminate:
-
-        >>> from random import randrange
-        >>> times = None
-        >>> args = 1, 11
-        >>> take(6, repeatfunc(randrange, times, *args))  # doctest:+SKIP
-        [2, 4, 8, 1, 8, 4]
-
-    """
-    if times is None:
-        return starmap(func, repeat(args))
-    return starmap(func, repeat(args, times))
-
-
-def _pairwise(iterable):
-    """Returns an iterator of paired items, overlapping, from the original
-
-    >>> take(4, pairwise(count()))
-    [(0, 1), (1, 2), (2, 3), (3, 4)]
-
-    On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
-
-    """
-    a, b = tee(iterable)
-    next(b, None)
-    yield from zip(a, b)
-
-
-try:
-    from itertools import pairwise as itertools_pairwise
-except ImportError:
-    pairwise = _pairwise
-else:
-
-    def pairwise(iterable):
-        yield from itertools_pairwise(iterable)
-
-    pairwise.__doc__ = _pairwise.__doc__
-
-
-def grouper(iterable, n, fillvalue=None):
-    """Collect data into fixed-length chunks or blocks.
-
-    >>> list(grouper('ABCDEFG', 3, 'x'))
-    [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
-
-    """
-    if isinstance(iterable, int):
-        warnings.warn(
-            "grouper expects iterable as first parameter", DeprecationWarning
-        )
-        n, iterable = iterable, n
-    args = [iter(iterable)] * n
-    return zip_longest(fillvalue=fillvalue, *args)
-
-
-def roundrobin(*iterables):
-    """Yields an item from each iterable, alternating between them.
-
-        >>> list(roundrobin('ABC', 'D', 'EF'))
-        ['A', 'D', 'E', 'B', 'F', 'C']
-
-    This function produces the same output as :func:`interleave_longest`, but
-    may perform better for some inputs (in particular when the number of
-    iterables is small).
-
-    """
-    # Recipe credited to George Sakkis
-    pending = len(iterables)
-    nexts = cycle(iter(it).__next__ for it in iterables)
-    while pending:
-        try:
-            for next in nexts:
-                yield next()
-        except StopIteration:
-            pending -= 1
-            nexts = cycle(islice(nexts, pending))
-
-
-def partition(pred, iterable):
-    """
-    Returns a 2-tuple of iterables derived from the input iterable.
-    The first yields the items that have ``pred(item) == False``.
-    The second yields the items that have ``pred(item) == True``.
-
-        >>> is_odd = lambda x: x % 2 != 0
-        >>> iterable = range(10)
-        >>> even_items, odd_items = partition(is_odd, iterable)
-        >>> list(even_items), list(odd_items)
-        ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
-
-    If *pred* is None, :func:`bool` is used.
-
-        >>> iterable = [0, 1, False, True, '', ' ']
-        >>> false_items, true_items = partition(None, iterable)
-        >>> list(false_items), list(true_items)
-        ([0, False, ''], [1, True, ' '])
-
-    """
-    if pred is None:
-        pred = bool
-
-    evaluations = ((pred(x), x) for x in iterable)
-    t1, t2 = tee(evaluations)
-    return (
-        (x for (cond, x) in t1 if not cond),
-        (x for (cond, x) in t2 if cond),
-    )
-
-
-def powerset(iterable):
-    """Yields all possible subsets of the iterable.
-
-        >>> list(powerset([1, 2, 3]))
-        [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
-
-    :func:`powerset` will operate on iterables that aren't :class:`set`
-    instances, so repeated elements in the input will produce repeated elements
-    in the output. Use :func:`unique_everseen` on the input to avoid generating
-    duplicates:
-
-        >>> seq = [1, 1, 0]
-        >>> list(powerset(seq))
-        [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
-        >>> from more_itertools import unique_everseen
-        >>> list(powerset(unique_everseen(seq)))
-        [(), (1,), (0,), (1, 0)]
-
-    """
-    s = list(iterable)
-    return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
-
-
-def unique_everseen(iterable, key=None):
-    """
-    Yield unique elements, preserving order.
-
-        >>> list(unique_everseen('AAAABBBCCDAABBB'))
-        ['A', 'B', 'C', 'D']
-        >>> list(unique_everseen('ABBCcAD', str.lower))
-        ['A', 'B', 'C', 'D']
-
-    Sequences with a mix of hashable and unhashable items can be used.
-    The function will be slower (i.e., `O(n^2)`) for unhashable items.
-
-    Remember that ``list`` objects are unhashable - you can use the *key*
-    parameter to transform the list to a tuple (which is hashable) to
-    avoid a slowdown.
-
-        >>> iterable = ([1, 2], [2, 3], [1, 2])
-        >>> list(unique_everseen(iterable))  # Slow
-        [[1, 2], [2, 3]]
-        >>> list(unique_everseen(iterable, key=tuple))  # Faster
-        [[1, 2], [2, 3]]
-
-    Similary, you may want to convert unhashable ``set`` objects with
-    ``key=frozenset``. For ``dict`` objects,
-    ``key=lambda x: frozenset(x.items())`` can be used.
-
-    """
-    seenset = set()
-    seenset_add = seenset.add
-    seenlist = []
-    seenlist_add = seenlist.append
-    use_key = key is not None
-
-    for element in iterable:
-        k = key(element) if use_key else element
-        try:
-            if k not in seenset:
-                seenset_add(k)
-                yield element
-        except TypeError:
-            if k not in seenlist:
-                seenlist_add(k)
-                yield element
-
-
-def unique_justseen(iterable, key=None):
-    """Yields elements in order, ignoring serial duplicates
-
-    >>> list(unique_justseen('AAAABBBCCDAABBB'))
-    ['A', 'B', 'C', 'D', 'A', 'B']
-    >>> list(unique_justseen('ABBCcAD', str.lower))
-    ['A', 'B', 'C', 'A', 'D']
-
-    """
-    return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
-
-
-def iter_except(func, exception, first=None):
-    """Yields results from a function repeatedly until an exception is raised.
-
-    Converts a call-until-exception interface to an iterator interface.
-    Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
-    to end the loop.
-
-        >>> l = [0, 1, 2]
-        >>> list(iter_except(l.pop, IndexError))
-        [2, 1, 0]
-
-    Multiple exceptions can be specified as a stopping condition:
-
-        >>> l = [1, 2, 3, '...', 4, 5, 6]
-        >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
-        [7, 6, 5]
-        >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
-        [4, 3, 2]
-        >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
-        []
-
-    """
-    try:
-        if first is not None:
-            yield first()
-        while 1:
-            yield func()
-    except exception:
-        pass
-
-
-def first_true(iterable, default=None, pred=None):
-    """
-    Returns the first true value in the iterable.
-
-    If no true value is found, returns *default*
-
-    If *pred* is not None, returns the first item for which
-    ``pred(item) == True`` .
-
-        >>> first_true(range(10))
-        1
-        >>> first_true(range(10), pred=lambda x: x > 5)
-        6
-        >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
-        'missing'
-
-    """
-    return next(filter(pred, iterable), default)
-
-
-def random_product(*args, repeat=1):
-    """Draw an item at random from each of the input iterables.
-
-        >>> random_product('abc', range(4), 'XYZ')  # doctest:+SKIP
-        ('c', 3, 'Z')
-
-    If *repeat* is provided as a keyword argument, that many items will be
-    drawn from each iterable.
-
-        >>> random_product('abcd', range(4), repeat=2)  # doctest:+SKIP
-        ('a', 2, 'd', 3)
-
-    This equivalent to taking a random selection from
-    ``itertools.product(*args, **kwarg)``.
-
-    """
-    pools = [tuple(pool) for pool in args] * repeat
-    return tuple(choice(pool) for pool in pools)
-
-
-def random_permutation(iterable, r=None):
-    """Return a random *r* length permutation of the elements in *iterable*.
-
-    If *r* is not specified or is ``None``, then *r* defaults to the length of
-    *iterable*.
-
-        >>> random_permutation(range(5))  # doctest:+SKIP
-        (3, 4, 0, 1, 2)
-
-    This equivalent to taking a random selection from
-    ``itertools.permutations(iterable, r)``.
-
-    """
-    pool = tuple(iterable)
-    r = len(pool) if r is None else r
-    return tuple(sample(pool, r))
-
-
-def random_combination(iterable, r):
-    """Return a random *r* length subsequence of the elements in *iterable*.
-
-        >>> random_combination(range(5), 3)  # doctest:+SKIP
-        (2, 3, 4)
-
-    This equivalent to taking a random selection from
-    ``itertools.combinations(iterable, r)``.
-
-    """
-    pool = tuple(iterable)
-    n = len(pool)
-    indices = sorted(sample(range(n), r))
-    return tuple(pool[i] for i in indices)
-
-
-def random_combination_with_replacement(iterable, r):
-    """Return a random *r* length subsequence of elements in *iterable*,
-    allowing individual elements to be repeated.
-
-        >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
-        (0, 0, 1, 2, 2)
-
-    This equivalent to taking a random selection from
-    ``itertools.combinations_with_replacement(iterable, r)``.
-
-    """
-    pool = tuple(iterable)
-    n = len(pool)
-    indices = sorted(randrange(n) for i in range(r))
-    return tuple(pool[i] for i in indices)
-
-
-def nth_combination(iterable, r, index):
-    """Equivalent to ``list(combinations(iterable, r))[index]``.
-
-    The subsequences of *iterable* that are of length *r* can be ordered
-    lexicographically. :func:`nth_combination` computes the subsequence at
-    sort position *index* directly, without computing the previous
-    subsequences.
-
-        >>> nth_combination(range(5), 3, 5)
-        (0, 3, 4)
-
-    ``ValueError`` will be raised If *r* is negative or greater than the length
-    of *iterable*.
-    ``IndexError`` will be raised if the given *index* is invalid.
-    """
-    pool = tuple(iterable)
-    n = len(pool)
-    if (r < 0) or (r > n):
-        raise ValueError
-
-    c = 1
-    k = min(r, n - r)
-    for i in range(1, k + 1):
-        c = c * (n - k + i) // i
-
-    if index < 0:
-        index += c
-
-    if (index < 0) or (index >= c):
-        raise IndexError
-
-    result = []
-    while r:
-        c, n, r = c * r // n, n - 1, r - 1
-        while index >= c:
-            index -= c
-            c, n = c * (n - r) // n, n - 1
-        result.append(pool[-1 - n])
-
-    return tuple(result)
-
-
-def prepend(value, iterator):
-    """Yield *value*, followed by the elements in *iterator*.
-
-        >>> value = '0'
-        >>> iterator = ['1', '2', '3']
-        >>> list(prepend(value, iterator))
-        ['0', '1', '2', '3']
-
-    To prepend multiple values, see :func:`itertools.chain`
-    or :func:`value_chain`.
-
-    """
-    return chain([value], iterator)
-
-
-def convolve(signal, kernel):
-    """Convolve the iterable *signal* with the iterable *kernel*.
-
-        >>> signal = (1, 2, 3, 4, 5)
-        >>> kernel = [3, 2, 1]
-        >>> list(convolve(signal, kernel))
-        [3, 8, 14, 20, 26, 14, 5]
-
-    Note: the input arguments are not interchangeable, as the *kernel*
-    is immediately consumed and stored.
-
-    """
-    kernel = tuple(kernel)[::-1]
-    n = len(kernel)
-    window = deque([0], maxlen=n) * n
-    for x in chain(signal, repeat(0, n - 1)):
-        window.append(x)
-        yield sum(map(operator.mul, kernel, window))
-
-
-def before_and_after(predicate, it):
-    """A variant of :func:`takewhile` that allows complete access to the
-    remainder of the iterator.
-
-         >>> it = iter('ABCdEfGhI')
-         >>> all_upper, remainder = before_and_after(str.isupper, it)
-         >>> ''.join(all_upper)
-         'ABC'
-         >>> ''.join(remainder) # takewhile() would lose the 'd'
-         'dEfGhI'
-
-    Note that the first iterator must be fully consumed before the second
-    iterator can generate valid results.
-    """
-    it = iter(it)
-    transition = []
-
-    def true_iterator():
-        for elem in it:
-            if predicate(elem):
-                yield elem
-            else:
-                transition.append(elem)
-                return
-
-    def remainder_iterator():
-        yield from transition
-        yield from it
-
-    return true_iterator(), remainder_iterator()
-
-
-def triplewise(iterable):
-    """Return overlapping triplets from *iterable*.
-
-    >>> list(triplewise('ABCDE'))
-    [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
-
-    """
-    for (a, _), (b, c) in pairwise(pairwise(iterable)):
-        yield a, b, c
-
-
-def sliding_window(iterable, n):
-    """Return a sliding window of width *n* over *iterable*.
-
-        >>> list(sliding_window(range(6), 4))
-        [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
-
-    If *iterable* has fewer than *n* items, then nothing is yielded:
-
-        >>> list(sliding_window(range(3), 4))
-        []
-
-    For a variant with more features, see :func:`windowed`.
-    """
-    it = iter(iterable)
-    window = deque(islice(it, n), maxlen=n)
-    if len(window) == n:
-        yield tuple(window)
-    for x in it:
-        window.append(x)
-        yield tuple(window)
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py
deleted file mode 100644
index 3551bc2..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__about__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-__all__ = [
-    "__title__",
-    "__summary__",
-    "__uri__",
-    "__version__",
-    "__author__",
-    "__email__",
-    "__license__",
-    "__copyright__",
-]
-
-__title__ = "packaging"
-__summary__ = "Core utilities for Python packages"
-__uri__ = "https://github.com/pypa/packaging"
-
-__version__ = "21.3"
-
-__author__ = "Donald Stufft and individual contributors"
-__email__ = "donald@stufft.io"
-
-__license__ = "BSD-2-Clause or Apache-2.0"
-__copyright__ = "2014-2019 %s" % __author__
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py
deleted file mode 100644
index 3c50c5d..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-from .__about__ import (
-    __author__,
-    __copyright__,
-    __email__,
-    __license__,
-    __summary__,
-    __title__,
-    __uri__,
-    __version__,
-)
-
-__all__ = [
-    "__title__",
-    "__summary__",
-    "__uri__",
-    "__version__",
-    "__author__",
-    "__email__",
-    "__license__",
-    "__copyright__",
-]
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc
deleted file mode 100644
index 88c1b4f..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc
deleted file mode 100644
index e73bbb4..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc
deleted file mode 100644
index 450e28b..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc
deleted file mode 100644
index aff46d0..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc
deleted file mode 100644
index 47064a3..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-310.pyc
deleted file mode 100644
index 2be0d28..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc
deleted file mode 100644
index ce98dd4..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc
deleted file mode 100644
index dc59cac..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc
deleted file mode 100644
index eb8ae2b..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc
deleted file mode 100644
index a2cf382..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc
deleted file mode 100644
index 4988e79..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py
deleted file mode 100644
index 4c379aa..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_manylinux.py
+++ /dev/null
@@ -1,301 +0,0 @@
-import collections
-import functools
-import os
-import re
-import struct
-import sys
-import warnings
-from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
-
-
-# Python does not provide platform information at sufficient granularity to
-# identify the architecture of the running executable in some cases, so we
-# determine it dynamically by reading the information from the running
-# process. This only applies on Linux, which uses the ELF format.
-class _ELFFileHeader:
-    # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
-    class _InvalidELFFileHeader(ValueError):
-        """
-        An invalid ELF file header was found.
-        """
-
-    ELF_MAGIC_NUMBER = 0x7F454C46
-    ELFCLASS32 = 1
-    ELFCLASS64 = 2
-    ELFDATA2LSB = 1
-    ELFDATA2MSB = 2
-    EM_386 = 3
-    EM_S390 = 22
-    EM_ARM = 40
-    EM_X86_64 = 62
-    EF_ARM_ABIMASK = 0xFF000000
-    EF_ARM_ABI_VER5 = 0x05000000
-    EF_ARM_ABI_FLOAT_HARD = 0x00000400
-
-    def __init__(self, file: IO[bytes]) -> None:
-        def unpack(fmt: str) -> int:
-            try:
-                data = file.read(struct.calcsize(fmt))
-                result: Tuple[int, ...] = struct.unpack(fmt, data)
-            except struct.error:
-                raise _ELFFileHeader._InvalidELFFileHeader()
-            return result[0]
-
-        self.e_ident_magic = unpack(">I")
-        if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
-            raise _ELFFileHeader._InvalidELFFileHeader()
-        self.e_ident_class = unpack("B")
-        if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
-            raise _ELFFileHeader._InvalidELFFileHeader()
-        self.e_ident_data = unpack("B")
-        if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
-            raise _ELFFileHeader._InvalidELFFileHeader()
-        self.e_ident_version = unpack("B")
-        self.e_ident_osabi = unpack("B")
-        self.e_ident_abiversion = unpack("B")
-        self.e_ident_pad = file.read(7)
-        format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
-        format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
-        format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
-        format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
-        self.e_type = unpack(format_h)
-        self.e_machine = unpack(format_h)
-        self.e_version = unpack(format_i)
-        self.e_entry = unpack(format_p)
-        self.e_phoff = unpack(format_p)
-        self.e_shoff = unpack(format_p)
-        self.e_flags = unpack(format_i)
-        self.e_ehsize = unpack(format_h)
-        self.e_phentsize = unpack(format_h)
-        self.e_phnum = unpack(format_h)
-        self.e_shentsize = unpack(format_h)
-        self.e_shnum = unpack(format_h)
-        self.e_shstrndx = unpack(format_h)
-
-
-def _get_elf_header() -> Optional[_ELFFileHeader]:
-    try:
-        with open(sys.executable, "rb") as f:
-            elf_header = _ELFFileHeader(f)
-    except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
-        return None
-    return elf_header
-
-
-def _is_linux_armhf() -> bool:
-    # hard-float ABI can be detected from the ELF header of the running
-    # process
-    # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
-    elf_header = _get_elf_header()
-    if elf_header is None:
-        return False
-    result = elf_header.e_ident_class == elf_header.ELFCLASS32
-    result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
-    result &= elf_header.e_machine == elf_header.EM_ARM
-    result &= (
-        elf_header.e_flags & elf_header.EF_ARM_ABIMASK
-    ) == elf_header.EF_ARM_ABI_VER5
-    result &= (
-        elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
-    ) == elf_header.EF_ARM_ABI_FLOAT_HARD
-    return result
-
-
-def _is_linux_i686() -> bool:
-    elf_header = _get_elf_header()
-    if elf_header is None:
-        return False
-    result = elf_header.e_ident_class == elf_header.ELFCLASS32
-    result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
-    result &= elf_header.e_machine == elf_header.EM_386
-    return result
-
-
-def _have_compatible_abi(arch: str) -> bool:
-    if arch == "armv7l":
-        return _is_linux_armhf()
-    if arch == "i686":
-        return _is_linux_i686()
-    return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
-
-
-# If glibc ever changes its major version, we need to know what the last
-# minor version was, so we can build the complete list of all versions.
-# For now, guess what the highest minor version might be, assume it will
-# be 50 for testing. Once this actually happens, update the dictionary
-# with the actual value.
-_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
-
-
-class _GLibCVersion(NamedTuple):
-    major: int
-    minor: int
-
-
-def _glibc_version_string_confstr() -> Optional[str]:
-    """
-    Primary implementation of glibc_version_string using os.confstr.
-    """
-    # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
-    # to be broken or missing. This strategy is used in the standard library
-    # platform module.
-    # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
-    try:
-        # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
-        version_string = os.confstr("CS_GNU_LIBC_VERSION")
-        assert version_string is not None
-        _, version = version_string.split()
-    except (AssertionError, AttributeError, OSError, ValueError):
-        # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
-        return None
-    return version
-
-
-def _glibc_version_string_ctypes() -> Optional[str]:
-    """
-    Fallback implementation of glibc_version_string using ctypes.
-    """
-    try:
-        import ctypes
-    except ImportError:
-        return None
-
-    # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
-    # manpage says, "If filename is NULL, then the returned handle is for the
-    # main program". This way we can let the linker do the work to figure out
-    # which libc our process is actually using.
-    #
-    # We must also handle the special case where the executable is not a
-    # dynamically linked executable. This can occur when using musl libc,
-    # for example. In this situation, dlopen() will error, leading to an
-    # OSError. Interestingly, at least in the case of musl, there is no
-    # errno set on the OSError. The single string argument used to construct
-    # OSError comes from libc itself and is therefore not portable to
-    # hard code here. In any case, failure to call dlopen() means we
-    # can proceed, so we bail on our attempt.
-    try:
-        process_namespace = ctypes.CDLL(None)
-    except OSError:
-        return None
-
-    try:
-        gnu_get_libc_version = process_namespace.gnu_get_libc_version
-    except AttributeError:
-        # Symbol doesn't exist -> therefore, we are not linked to
-        # glibc.
-        return None
-
-    # Call gnu_get_libc_version, which returns a string like "2.5"
-    gnu_get_libc_version.restype = ctypes.c_char_p
-    version_str: str = gnu_get_libc_version()
-    # py2 / py3 compatibility:
-    if not isinstance(version_str, str):
-        version_str = version_str.decode("ascii")
-
-    return version_str
-
-
-def _glibc_version_string() -> Optional[str]:
-    """Returns glibc version string, or None if not using glibc."""
-    return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
-
-
-def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
-    """Parse glibc version.
-
-    We use a regexp instead of str.split because we want to discard any
-    random junk that might come after the minor version -- this might happen
-    in patched/forked versions of glibc (e.g. Linaro's version of glibc
-    uses version strings like "2.20-2014.11"). See gh-3588.
-    """
-    m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
-    if not m:
-        warnings.warn(
-            "Expected glibc version with 2 components major.minor,"
-            " got: %s" % version_str,
-            RuntimeWarning,
-        )
-        return -1, -1
-    return int(m.group("major")), int(m.group("minor"))
-
-
-@functools.lru_cache()
-def _get_glibc_version() -> Tuple[int, int]:
-    version_str = _glibc_version_string()
-    if version_str is None:
-        return (-1, -1)
-    return _parse_glibc_version(version_str)
-
-
-# From PEP 513, PEP 600
-def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
-    sys_glibc = _get_glibc_version()
-    if sys_glibc < version:
-        return False
-    # Check for presence of _manylinux module.
-    try:
-        import _manylinux  # noqa
-    except ImportError:
-        return True
-    if hasattr(_manylinux, "manylinux_compatible"):
-        result = _manylinux.manylinux_compatible(version[0], version[1], arch)
-        if result is not None:
-            return bool(result)
-        return True
-    if version == _GLibCVersion(2, 5):
-        if hasattr(_manylinux, "manylinux1_compatible"):
-            return bool(_manylinux.manylinux1_compatible)
-    if version == _GLibCVersion(2, 12):
-        if hasattr(_manylinux, "manylinux2010_compatible"):
-            return bool(_manylinux.manylinux2010_compatible)
-    if version == _GLibCVersion(2, 17):
-        if hasattr(_manylinux, "manylinux2014_compatible"):
-            return bool(_manylinux.manylinux2014_compatible)
-    return True
-
-
-_LEGACY_MANYLINUX_MAP = {
-    # CentOS 7 w/ glibc 2.17 (PEP 599)
-    (2, 17): "manylinux2014",
-    # CentOS 6 w/ glibc 2.12 (PEP 571)
-    (2, 12): "manylinux2010",
-    # CentOS 5 w/ glibc 2.5 (PEP 513)
-    (2, 5): "manylinux1",
-}
-
-
-def platform_tags(linux: str, arch: str) -> Iterator[str]:
-    if not _have_compatible_abi(arch):
-        return
-    # Oldest glibc to be supported regardless of architecture is (2, 17).
-    too_old_glibc2 = _GLibCVersion(2, 16)
-    if arch in {"x86_64", "i686"}:
-        # On x86/i686 also oldest glibc to be supported is (2, 5).
-        too_old_glibc2 = _GLibCVersion(2, 4)
-    current_glibc = _GLibCVersion(*_get_glibc_version())
-    glibc_max_list = [current_glibc]
-    # We can assume compatibility across glibc major versions.
-    # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
-    #
-    # Build a list of maximum glibc versions so that we can
-    # output the canonical list of all glibc from current_glibc
-    # down to too_old_glibc2, including all intermediary versions.
-    for glibc_major in range(current_glibc.major - 1, 1, -1):
-        glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
-        glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
-    for glibc_max in glibc_max_list:
-        if glibc_max.major == too_old_glibc2.major:
-            min_minor = too_old_glibc2.minor
-        else:
-            # For other glibc major versions oldest supported is (x, 0).
-            min_minor = -1
-        for glibc_minor in range(glibc_max.minor, min_minor, -1):
-            glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
-            tag = "manylinux_{}_{}".format(*glibc_version)
-            if _is_compatible(tag, arch, glibc_version):
-                yield linux.replace("linux", tag)
-            # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
-            if glibc_version in _LEGACY_MANYLINUX_MAP:
-                legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
-                if _is_compatible(legacy_tag, arch, glibc_version):
-                    yield linux.replace("linux", legacy_tag)
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py
deleted file mode 100644
index 8ac3059..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_musllinux.py
+++ /dev/null
@@ -1,136 +0,0 @@
-"""PEP 656 support.
-
-This module implements logic to detect if the currently running Python is
-linked against musl, and what musl version is used.
-"""
-
-import contextlib
-import functools
-import operator
-import os
-import re
-import struct
-import subprocess
-import sys
-from typing import IO, Iterator, NamedTuple, Optional, Tuple
-
-
-def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
-    return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
-
-
-def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
-    """Detect musl libc location by parsing the Python executable.
-
-    Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
-    ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
-    """
-    f.seek(0)
-    try:
-        ident = _read_unpacked(f, "16B")
-    except struct.error:
-        return None
-    if ident[:4] != tuple(b"\x7fELF"):  # Invalid magic, not ELF.
-        return None
-    f.seek(struct.calcsize("HHI"), 1)  # Skip file type, machine, and version.
-
-    try:
-        # e_fmt: Format for program header.
-        # p_fmt: Format for section header.
-        # p_idx: Indexes to find p_type, p_offset, and p_filesz.
-        e_fmt, p_fmt, p_idx = {
-            1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)),  # 32-bit.
-            2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)),  # 64-bit.
-        }[ident[4]]
-    except KeyError:
-        return None
-    else:
-        p_get = operator.itemgetter(*p_idx)
-
-    # Find the interpreter section and return its content.
-    try:
-        _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
-    except struct.error:
-        return None
-    for i in range(e_phnum + 1):
-        f.seek(e_phoff + e_phentsize * i)
-        try:
-            p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
-        except struct.error:
-            return None
-        if p_type != 3:  # Not PT_INTERP.
-            continue
-        f.seek(p_offset)
-        interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
-        if "musl" not in interpreter:
-            return None
-        return interpreter
-    return None
-
-
-class _MuslVersion(NamedTuple):
-    major: int
-    minor: int
-
-
-def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
-    lines = [n for n in (n.strip() for n in output.splitlines()) if n]
-    if len(lines) < 2 or lines[0][:4] != "musl":
-        return None
-    m = re.match(r"Version (\d+)\.(\d+)", lines[1])
-    if not m:
-        return None
-    return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
-
-
-@functools.lru_cache()
-def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
-    """Detect currently-running musl runtime version.
-
-    This is done by checking the specified executable's dynamic linking
-    information, and invoking the loader to parse its output for a version
-    string. If the loader is musl, the output would be something like::
-
-        musl libc (x86_64)
-        Version 1.2.2
-        Dynamic Program Loader
-    """
-    with contextlib.ExitStack() as stack:
-        try:
-            f = stack.enter_context(open(executable, "rb"))
-        except OSError:
-            return None
-        ld = _parse_ld_musl_from_elf(f)
-    if not ld:
-        return None
-    proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
-    return _parse_musl_version(proc.stderr)
-
-
-def platform_tags(arch: str) -> Iterator[str]:
-    """Generate musllinux tags compatible to the current platform.
-
-    :param arch: Should be the part of platform tag after the ``linux_``
-        prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
-        prerequisite for the current platform to be musllinux-compatible.
-
-    :returns: An iterator of compatible musllinux tags.
-    """
-    sys_musl = _get_musl_version(sys.executable)
-    if sys_musl is None:  # Python not dynamically linked against musl.
-        return
-    for minor in range(sys_musl.minor, -1, -1):
-        yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
-
-
-if __name__ == "__main__":  # pragma: no cover
-    import sysconfig
-
-    plat = sysconfig.get_platform()
-    assert plat.startswith("linux-"), "not linux"
-
-    print("plat:", plat)
-    print("musl:", _get_musl_version(sys.executable))
-    print("tags:", end=" ")
-    for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
-        print(t, end="\n      ")
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py
deleted file mode 100644
index 90a6465..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/_structures.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-
-class InfinityType:
-    def __repr__(self) -> str:
-        return "Infinity"
-
-    def __hash__(self) -> int:
-        return hash(repr(self))
-
-    def __lt__(self, other: object) -> bool:
-        return False
-
-    def __le__(self, other: object) -> bool:
-        return False
-
-    def __eq__(self, other: object) -> bool:
-        return isinstance(other, self.__class__)
-
-    def __gt__(self, other: object) -> bool:
-        return True
-
-    def __ge__(self, other: object) -> bool:
-        return True
-
-    def __neg__(self: object) -> "NegativeInfinityType":
-        return NegativeInfinity
-
-
-Infinity = InfinityType()
-
-
-class NegativeInfinityType:
-    def __repr__(self) -> str:
-        return "-Infinity"
-
-    def __hash__(self) -> int:
-        return hash(repr(self))
-
-    def __lt__(self, other: object) -> bool:
-        return True
-
-    def __le__(self, other: object) -> bool:
-        return True
-
-    def __eq__(self, other: object) -> bool:
-        return isinstance(other, self.__class__)
-
-    def __gt__(self, other: object) -> bool:
-        return False
-
-    def __ge__(self, other: object) -> bool:
-        return False
-
-    def __neg__(self: object) -> InfinityType:
-        return Infinity
-
-
-NegativeInfinity = NegativeInfinityType()
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py
deleted file mode 100644
index 18769b0..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/markers.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import operator
-import os
-import platform
-import sys
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
-
-from pkg_resources.extern.pyparsing import (  # noqa: N817
-    Forward,
-    Group,
-    Literal as L,
-    ParseException,
-    ParseResults,
-    QuotedString,
-    ZeroOrMore,
-    stringEnd,
-    stringStart,
-)
-
-from .specifiers import InvalidSpecifier, Specifier
-
-__all__ = [
-    "InvalidMarker",
-    "UndefinedComparison",
-    "UndefinedEnvironmentName",
-    "Marker",
-    "default_environment",
-]
-
-Operator = Callable[[str, str], bool]
-
-
-class InvalidMarker(ValueError):
-    """
-    An invalid marker was found, users should refer to PEP 508.
-    """
-
-
-class UndefinedComparison(ValueError):
-    """
-    An invalid operation was attempted on a value that doesn't support it.
-    """
-
-
-class UndefinedEnvironmentName(ValueError):
-    """
-    A name was attempted to be used that does not exist inside of the
-    environment.
-    """
-
-
-class Node:
-    def __init__(self, value: Any) -> None:
-        self.value = value
-
-    def __str__(self) -> str:
-        return str(self.value)
-
-    def __repr__(self) -> str:
-        return f"<{self.__class__.__name__}('{self}')>"
-
-    def serialize(self) -> str:
-        raise NotImplementedError
-
-
-class Variable(Node):
-    def serialize(self) -> str:
-        return str(self)
-
-
-class Value(Node):
-    def serialize(self) -> str:
-        return f'"{self}"'
-
-
-class Op(Node):
-    def serialize(self) -> str:
-        return str(self)
-
-
-VARIABLE = (
-    L("implementation_version")
-    | L("platform_python_implementation")
-    | L("implementation_name")
-    | L("python_full_version")
-    | L("platform_release")
-    | L("platform_version")
-    | L("platform_machine")
-    | L("platform_system")
-    | L("python_version")
-    | L("sys_platform")
-    | L("os_name")
-    | L("os.name")  # PEP-345
-    | L("sys.platform")  # PEP-345
-    | L("platform.version")  # PEP-345
-    | L("platform.machine")  # PEP-345
-    | L("platform.python_implementation")  # PEP-345
-    | L("python_implementation")  # undocumented setuptools legacy
-    | L("extra")  # PEP-508
-)
-ALIASES = {
-    "os.name": "os_name",
-    "sys.platform": "sys_platform",
-    "platform.version": "platform_version",
-    "platform.machine": "platform_machine",
-    "platform.python_implementation": "platform_python_implementation",
-    "python_implementation": "platform_python_implementation",
-}
-VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
-
-VERSION_CMP = (
-    L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
-)
-
-MARKER_OP = VERSION_CMP | L("not in") | L("in")
-MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
-
-MARKER_VALUE = QuotedString("'") | QuotedString('"')
-MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
-
-BOOLOP = L("and") | L("or")
-
-MARKER_VAR = VARIABLE | MARKER_VALUE
-
-MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
-MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
-
-LPAREN = L("(").suppress()
-RPAREN = L(")").suppress()
-
-MARKER_EXPR = Forward()
-MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
-MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
-
-MARKER = stringStart + MARKER_EXPR + stringEnd
-
-
-def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
-    if isinstance(results, ParseResults):
-        return [_coerce_parse_result(i) for i in results]
-    else:
-        return results
-
-
-def _format_marker(
-    marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
-) -> str:
-
-    assert isinstance(marker, (list, tuple, str))
-
-    # Sometimes we have a structure like [[...]] which is a single item list
-    # where the single item is itself it's own list. In that case we want skip
-    # the rest of this function so that we don't get extraneous () on the
-    # outside.
-    if (
-        isinstance(marker, list)
-        and len(marker) == 1
-        and isinstance(marker[0], (list, tuple))
-    ):
-        return _format_marker(marker[0])
-
-    if isinstance(marker, list):
-        inner = (_format_marker(m, first=False) for m in marker)
-        if first:
-            return " ".join(inner)
-        else:
-            return "(" + " ".join(inner) + ")"
-    elif isinstance(marker, tuple):
-        return " ".join([m.serialize() for m in marker])
-    else:
-        return marker
-
-
-_operators: Dict[str, Operator] = {
-    "in": lambda lhs, rhs: lhs in rhs,
-    "not in": lambda lhs, rhs: lhs not in rhs,
-    "<": operator.lt,
-    "<=": operator.le,
-    "==": operator.eq,
-    "!=": operator.ne,
-    ">=": operator.ge,
-    ">": operator.gt,
-}
-
-
-def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
-    try:
-        spec = Specifier("".join([op.serialize(), rhs]))
-    except InvalidSpecifier:
-        pass
-    else:
-        return spec.contains(lhs)
-
-    oper: Optional[Operator] = _operators.get(op.serialize())
-    if oper is None:
-        raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
-
-    return oper(lhs, rhs)
-
-
-class Undefined:
-    pass
-
-
-_undefined = Undefined()
-
-
-def _get_env(environment: Dict[str, str], name: str) -> str:
-    value: Union[str, Undefined] = environment.get(name, _undefined)
-
-    if isinstance(value, Undefined):
-        raise UndefinedEnvironmentName(
-            f"{name!r} does not exist in evaluation environment."
-        )
-
-    return value
-
-
-def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
-    groups: List[List[bool]] = [[]]
-
-    for marker in markers:
-        assert isinstance(marker, (list, tuple, str))
-
-        if isinstance(marker, list):
-            groups[-1].append(_evaluate_markers(marker, environment))
-        elif isinstance(marker, tuple):
-            lhs, op, rhs = marker
-
-            if isinstance(lhs, Variable):
-                lhs_value = _get_env(environment, lhs.value)
-                rhs_value = rhs.value
-            else:
-                lhs_value = lhs.value
-                rhs_value = _get_env(environment, rhs.value)
-
-            groups[-1].append(_eval_op(lhs_value, op, rhs_value))
-        else:
-            assert marker in ["and", "or"]
-            if marker == "or":
-                groups.append([])
-
-    return any(all(item) for item in groups)
-
-
-def format_full_version(info: "sys._version_info") -> str:
-    version = "{0.major}.{0.minor}.{0.micro}".format(info)
-    kind = info.releaselevel
-    if kind != "final":
-        version += kind[0] + str(info.serial)
-    return version
-
-
-def default_environment() -> Dict[str, str]:
-    iver = format_full_version(sys.implementation.version)
-    implementation_name = sys.implementation.name
-    return {
-        "implementation_name": implementation_name,
-        "implementation_version": iver,
-        "os_name": os.name,
-        "platform_machine": platform.machine(),
-        "platform_release": platform.release(),
-        "platform_system": platform.system(),
-        "platform_version": platform.version(),
-        "python_full_version": platform.python_version(),
-        "platform_python_implementation": platform.python_implementation(),
-        "python_version": ".".join(platform.python_version_tuple()[:2]),
-        "sys_platform": sys.platform,
-    }
-
-
-class Marker:
-    def __init__(self, marker: str) -> None:
-        try:
-            self._markers = _coerce_parse_result(MARKER.parseString(marker))
-        except ParseException as e:
-            raise InvalidMarker(
-                f"Invalid marker: {marker!r}, parse error at "
-                f"{marker[e.loc : e.loc + 8]!r}"
-            )
-
-    def __str__(self) -> str:
-        return _format_marker(self._markers)
-
-    def __repr__(self) -> str:
-        return f"<Marker('{self}')>"
-
-    def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
-        """Evaluate a marker.
-
-        Return the boolean from evaluating the given marker against the
-        environment. environment is an optional argument to override all or
-        part of the determined environment.
-
-        The environment is determined from the current Python process.
-        """
-        current_environment = default_environment()
-        if environment is not None:
-            current_environment.update(environment)
-
-        return _evaluate_markers(self._markers, current_environment)
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py
deleted file mode 100644
index 6af14ec..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/requirements.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import re
-import string
-import urllib.parse
-from typing import List, Optional as TOptional, Set
-
-from pkg_resources.extern.pyparsing import (  # noqa
-    Combine,
-    Literal as L,
-    Optional,
-    ParseException,
-    Regex,
-    Word,
-    ZeroOrMore,
-    originalTextFor,
-    stringEnd,
-    stringStart,
-)
-
-from .markers import MARKER_EXPR, Marker
-from .specifiers import LegacySpecifier, Specifier, SpecifierSet
-
-
-class InvalidRequirement(ValueError):
-    """
-    An invalid requirement was found, users should refer to PEP 508.
-    """
-
-
-ALPHANUM = Word(string.ascii_letters + string.digits)
-
-LBRACKET = L("[").suppress()
-RBRACKET = L("]").suppress()
-LPAREN = L("(").suppress()
-RPAREN = L(")").suppress()
-COMMA = L(",").suppress()
-SEMICOLON = L(";").suppress()
-AT = L("@").suppress()
-
-PUNCTUATION = Word("-_.")
-IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
-IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
-
-NAME = IDENTIFIER("name")
-EXTRA = IDENTIFIER
-
-URI = Regex(r"[^ ]+")("url")
-URL = AT + URI
-
-EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
-EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
-
-VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
-VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
-
-VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
-VERSION_MANY = Combine(
-    VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
-)("_raw_spec")
-_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
-_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
-
-VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
-VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
-
-MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
-MARKER_EXPR.setParseAction(
-    lambda s, l, t: Marker(s[t._original_start : t._original_end])
-)
-MARKER_SEPARATOR = SEMICOLON
-MARKER = MARKER_SEPARATOR + MARKER_EXPR
-
-VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
-URL_AND_MARKER = URL + Optional(MARKER)
-
-NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
-
-REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
-# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
-# issue #104
-REQUIREMENT.parseString("x[]")
-
-
-class Requirement:
-    """Parse a requirement.
-
-    Parse a given requirement string into its parts, such as name, specifier,
-    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
-    string.
-    """
-
-    # TODO: Can we test whether something is contained within a requirement?
-    #       If so how do we do that? Do we need to test against the _name_ of
-    #       the thing as well as the version? What about the markers?
-    # TODO: Can we normalize the name and extra name?
-
-    def __init__(self, requirement_string: str) -> None:
-        try:
-            req = REQUIREMENT.parseString(requirement_string)
-        except ParseException as e:
-            raise InvalidRequirement(
-                f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
-            )
-
-        self.name: str = req.name
-        if req.url:
-            parsed_url = urllib.parse.urlparse(req.url)
-            if parsed_url.scheme == "file":
-                if urllib.parse.urlunparse(parsed_url) != req.url:
-                    raise InvalidRequirement("Invalid URL given")
-            elif not (parsed_url.scheme and parsed_url.netloc) or (
-                not parsed_url.scheme and not parsed_url.netloc
-            ):
-                raise InvalidRequirement(f"Invalid URL: {req.url}")
-            self.url: TOptional[str] = req.url
-        else:
-            self.url = None
-        self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
-        self.specifier: SpecifierSet = SpecifierSet(req.specifier)
-        self.marker: TOptional[Marker] = req.marker if req.marker else None
-
-    def __str__(self) -> str:
-        parts: List[str] = [self.name]
-
-        if self.extras:
-            formatted_extras = ",".join(sorted(self.extras))
-            parts.append(f"[{formatted_extras}]")
-
-        if self.specifier:
-            parts.append(str(self.specifier))
-
-        if self.url:
-            parts.append(f"@ {self.url}")
-            if self.marker:
-                parts.append(" ")
-
-        if self.marker:
-            parts.append(f"; {self.marker}")
-
-        return "".join(parts)
-
-    def __repr__(self) -> str:
-        return f"<Requirement('{self}')>"
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py
deleted file mode 100644
index 0e218a6..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/specifiers.py
+++ /dev/null
@@ -1,802 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import abc
-import functools
-import itertools
-import re
-import warnings
-from typing import (
-    Callable,
-    Dict,
-    Iterable,
-    Iterator,
-    List,
-    Optional,
-    Pattern,
-    Set,
-    Tuple,
-    TypeVar,
-    Union,
-)
-
-from .utils import canonicalize_version
-from .version import LegacyVersion, Version, parse
-
-ParsedVersion = Union[Version, LegacyVersion]
-UnparsedVersion = Union[Version, LegacyVersion, str]
-VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
-CallableOperator = Callable[[ParsedVersion, str], bool]
-
-
-class InvalidSpecifier(ValueError):
-    """
-    An invalid specifier was found, users should refer to PEP 440.
-    """
-
-
-class BaseSpecifier(metaclass=abc.ABCMeta):
-    @abc.abstractmethod
-    def __str__(self) -> str:
-        """
-        Returns the str representation of this Specifier like object. This
-        should be representative of the Specifier itself.
-        """
-
-    @abc.abstractmethod
-    def __hash__(self) -> int:
-        """
-        Returns a hash value for this Specifier like object.
-        """
-
-    @abc.abstractmethod
-    def __eq__(self, other: object) -> bool:
-        """
-        Returns a boolean representing whether or not the two Specifier like
-        objects are equal.
-        """
-
-    @abc.abstractproperty
-    def prereleases(self) -> Optional[bool]:
-        """
-        Returns whether or not pre-releases as a whole are allowed by this
-        specifier.
-        """
-
-    @prereleases.setter
-    def prereleases(self, value: bool) -> None:
-        """
-        Sets whether or not pre-releases as a whole are allowed by this
-        specifier.
-        """
-
-    @abc.abstractmethod
-    def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
-        """
-        Determines if the given item is contained within this specifier.
-        """
-
-    @abc.abstractmethod
-    def filter(
-        self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
-    ) -> Iterable[VersionTypeVar]:
-        """
-        Takes an iterable of items and filters them so that only items which
-        are contained within this specifier are allowed in it.
-        """
-
-
-class _IndividualSpecifier(BaseSpecifier):
-
-    _operators: Dict[str, str] = {}
-    _regex: Pattern[str]
-
-    def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
-        match = self._regex.search(spec)
-        if not match:
-            raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
-
-        self._spec: Tuple[str, str] = (
-            match.group("operator").strip(),
-            match.group("version").strip(),
-        )
-
-        # Store whether or not this Specifier should accept prereleases
-        self._prereleases = prereleases
-
-    def __repr__(self) -> str:
-        pre = (
-            f", prereleases={self.prereleases!r}"
-            if self._prereleases is not None
-            else ""
-        )
-
-        return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
-
-    def __str__(self) -> str:
-        return "{}{}".format(*self._spec)
-
-    @property
-    def _canonical_spec(self) -> Tuple[str, str]:
-        return self._spec[0], canonicalize_version(self._spec[1])
-
-    def __hash__(self) -> int:
-        return hash(self._canonical_spec)
-
-    def __eq__(self, other: object) -> bool:
-        if isinstance(other, str):
-            try:
-                other = self.__class__(str(other))
-            except InvalidSpecifier:
-                return NotImplemented
-        elif not isinstance(other, self.__class__):
-            return NotImplemented
-
-        return self._canonical_spec == other._canonical_spec
-
-    def _get_operator(self, op: str) -> CallableOperator:
-        operator_callable: CallableOperator = getattr(
-            self, f"_compare_{self._operators[op]}"
-        )
-        return operator_callable
-
-    def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
-        if not isinstance(version, (LegacyVersion, Version)):
-            version = parse(version)
-        return version
-
-    @property
-    def operator(self) -> str:
-        return self._spec[0]
-
-    @property
-    def version(self) -> str:
-        return self._spec[1]
-
-    @property
-    def prereleases(self) -> Optional[bool]:
-        return self._prereleases
-
-    @prereleases.setter
-    def prereleases(self, value: bool) -> None:
-        self._prereleases = value
-
-    def __contains__(self, item: str) -> bool:
-        return self.contains(item)
-
-    def contains(
-        self, item: UnparsedVersion, prereleases: Optional[bool] = None
-    ) -> bool:
-
-        # Determine if prereleases are to be allowed or not.
-        if prereleases is None:
-            prereleases = self.prereleases
-
-        # Normalize item to a Version or LegacyVersion, this allows us to have
-        # a shortcut for ``"2.0" in Specifier(">=2")
-        normalized_item = self._coerce_version(item)
-
-        # Determine if we should be supporting prereleases in this specifier
-        # or not, if we do not support prereleases than we can short circuit
-        # logic if this version is a prereleases.
-        if normalized_item.is_prerelease and not prereleases:
-            return False
-
-        # Actually do the comparison to determine if this item is contained
-        # within this Specifier or not.
-        operator_callable: CallableOperator = self._get_operator(self.operator)
-        return operator_callable(normalized_item, self.version)
-
-    def filter(
-        self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
-    ) -> Iterable[VersionTypeVar]:
-
-        yielded = False
-        found_prereleases = []
-
-        kw = {"prereleases": prereleases if prereleases is not None else True}
-
-        # Attempt to iterate over all the values in the iterable and if any of
-        # them match, yield them.
-        for version in iterable:
-            parsed_version = self._coerce_version(version)
-
-            if self.contains(parsed_version, **kw):
-                # If our version is a prerelease, and we were not set to allow
-                # prereleases, then we'll store it for later in case nothing
-                # else matches this specifier.
-                if parsed_version.is_prerelease and not (
-                    prereleases or self.prereleases
-                ):
-                    found_prereleases.append(version)
-                # Either this is not a prerelease, or we should have been
-                # accepting prereleases from the beginning.
-                else:
-                    yielded = True
-                    yield version
-
-        # Now that we've iterated over everything, determine if we've yielded
-        # any values, and if we have not and we have any prereleases stored up
-        # then we will go ahead and yield the prereleases.
-        if not yielded and found_prereleases:
-            for version in found_prereleases:
-                yield version
-
-
-class LegacySpecifier(_IndividualSpecifier):
-
-    _regex_str = r"""
-        (?P<operator>(==|!=|<=|>=|<|>))
-        \s*
-        (?P<version>
-            [^,;\s)]* # Since this is a "legacy" specifier, and the version
-                      # string can be just about anything, we match everything
-                      # except for whitespace, a semi-colon for marker support,
-                      # a closing paren since versions can be enclosed in
-                      # them, and a comma since it's a version separator.
-        )
-        """
-
-    _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
-    _operators = {
-        "==": "equal",
-        "!=": "not_equal",
-        "<=": "less_than_equal",
-        ">=": "greater_than_equal",
-        "<": "less_than",
-        ">": "greater_than",
-    }
-
-    def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
-        super().__init__(spec, prereleases)
-
-        warnings.warn(
-            "Creating a LegacyVersion has been deprecated and will be "
-            "removed in the next major release",
-            DeprecationWarning,
-        )
-
-    def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
-        if not isinstance(version, LegacyVersion):
-            version = LegacyVersion(str(version))
-        return version
-
-    def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
-        return prospective == self._coerce_version(spec)
-
-    def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
-        return prospective != self._coerce_version(spec)
-
-    def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
-        return prospective <= self._coerce_version(spec)
-
-    def _compare_greater_than_equal(
-        self, prospective: LegacyVersion, spec: str
-    ) -> bool:
-        return prospective >= self._coerce_version(spec)
-
-    def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
-        return prospective < self._coerce_version(spec)
-
-    def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
-        return prospective > self._coerce_version(spec)
-
-
-def _require_version_compare(
-    fn: Callable[["Specifier", ParsedVersion, str], bool]
-) -> Callable[["Specifier", ParsedVersion, str], bool]:
-    @functools.wraps(fn)
-    def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
-        if not isinstance(prospective, Version):
-            return False
-        return fn(self, prospective, spec)
-
-    return wrapped
-
-
-class Specifier(_IndividualSpecifier):
-
-    _regex_str = r"""
-        (?P<operator>(~=|==|!=|<=|>=|<|>|===))
-        (?P<version>
-            (?:
-                # The identity operators allow for an escape hatch that will
-                # do an exact string match of the version you wish to install.
-                # This will not be parsed by PEP 440 and we cannot determine
-                # any semantic meaning from it. This operator is discouraged
-                # but included entirely as an escape hatch.
-                (?<====)  # Only match for the identity operator
-                \s*
-                [^\s]*    # We just match everything, except for whitespace
-                          # since we are only testing for strict identity.
-            )
-            |
-            (?:
-                # The (non)equality operators allow for wild card and local
-                # versions to be specified so we have to define these two
-                # operators separately to enable that.
-                (?<===|!=)            # Only match for equals and not equals
-
-                \s*
-                v?
-                (?:[0-9]+!)?          # epoch
-                [0-9]+(?:\.[0-9]+)*   # release
-                (?:                   # pre release
-                    [-_\.]?
-                    (a|b|c|rc|alpha|beta|pre|preview)
-                    [-_\.]?
-                    [0-9]*
-                )?
-                (?:                   # post release
-                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
-                )?
-
-                # You cannot use a wild card and a dev or local version
-                # together so group them with a | and make them optional.
-                (?:
-                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
-                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
-                    |
-                    \.\*  # Wild card syntax of .*
-                )?
-            )
-            |
-            (?:
-                # The compatible operator requires at least two digits in the
-                # release segment.
-                (?<=~=)               # Only match for the compatible operator
-
-                \s*
-                v?
-                (?:[0-9]+!)?          # epoch
-                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
-                (?:                   # pre release
-                    [-_\.]?
-                    (a|b|c|rc|alpha|beta|pre|preview)
-                    [-_\.]?
-                    [0-9]*
-                )?
-                (?:                                   # post release
-                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
-                )?
-                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
-            )
-            |
-            (?:
-                # All other operators only allow a sub set of what the
-                # (non)equality operators do. Specifically they do not allow
-                # local versions to be specified nor do they allow the prefix
-                # matching wild cards.
-                (?<!==|!=|~=)         # We have special cases for these
-                                      # operators so we want to make sure they
-                                      # don't match here.
-
-                \s*
-                v?
-                (?:[0-9]+!)?          # epoch
-                [0-9]+(?:\.[0-9]+)*   # release
-                (?:                   # pre release
-                    [-_\.]?
-                    (a|b|c|rc|alpha|beta|pre|preview)
-                    [-_\.]?
-                    [0-9]*
-                )?
-                (?:                                   # post release
-                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
-                )?
-                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
-            )
-        )
-        """
-
-    _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
-    _operators = {
-        "~=": "compatible",
-        "==": "equal",
-        "!=": "not_equal",
-        "<=": "less_than_equal",
-        ">=": "greater_than_equal",
-        "<": "less_than",
-        ">": "greater_than",
-        "===": "arbitrary",
-    }
-
-    @_require_version_compare
-    def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
-
-        # Compatible releases have an equivalent combination of >= and ==. That
-        # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
-        # implement this in terms of the other specifiers instead of
-        # implementing it ourselves. The only thing we need to do is construct
-        # the other specifiers.
-
-        # We want everything but the last item in the version, but we want to
-        # ignore suffix segments.
-        prefix = ".".join(
-            list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
-        )
-
-        # Add the prefix notation to the end of our string
-        prefix += ".*"
-
-        return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
-            prospective, prefix
-        )
-
-    @_require_version_compare
-    def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-
-        # We need special logic to handle prefix matching
-        if spec.endswith(".*"):
-            # In the case of prefix matching we want to ignore local segment.
-            prospective = Version(prospective.public)
-            # Split the spec out by dots, and pretend that there is an implicit
-            # dot in between a release segment and a pre-release segment.
-            split_spec = _version_split(spec[:-2])  # Remove the trailing .*
-
-            # Split the prospective version out by dots, and pretend that there
-            # is an implicit dot in between a release segment and a pre-release
-            # segment.
-            split_prospective = _version_split(str(prospective))
-
-            # Shorten the prospective version to be the same length as the spec
-            # so that we can determine if the specifier is a prefix of the
-            # prospective version or not.
-            shortened_prospective = split_prospective[: len(split_spec)]
-
-            # Pad out our two sides with zeros so that they both equal the same
-            # length.
-            padded_spec, padded_prospective = _pad_version(
-                split_spec, shortened_prospective
-            )
-
-            return padded_prospective == padded_spec
-        else:
-            # Convert our spec string into a Version
-            spec_version = Version(spec)
-
-            # If the specifier does not have a local segment, then we want to
-            # act as if the prospective version also does not have a local
-            # segment.
-            if not spec_version.local:
-                prospective = Version(prospective.public)
-
-            return prospective == spec_version
-
-    @_require_version_compare
-    def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-        return not self._compare_equal(prospective, spec)
-
-    @_require_version_compare
-    def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-
-        # NB: Local version identifiers are NOT permitted in the version
-        # specifier, so local version labels can be universally removed from
-        # the prospective version.
-        return Version(prospective.public) <= Version(spec)
-
-    @_require_version_compare
-    def _compare_greater_than_equal(
-        self, prospective: ParsedVersion, spec: str
-    ) -> bool:
-
-        # NB: Local version identifiers are NOT permitted in the version
-        # specifier, so local version labels can be universally removed from
-        # the prospective version.
-        return Version(prospective.public) >= Version(spec)
-
-    @_require_version_compare
-    def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
-
-        # Convert our spec to a Version instance, since we'll want to work with
-        # it as a version.
-        spec = Version(spec_str)
-
-        # Check to see if the prospective version is less than the spec
-        # version. If it's not we can short circuit and just return False now
-        # instead of doing extra unneeded work.
-        if not prospective < spec:
-            return False
-
-        # This special case is here so that, unless the specifier itself
-        # includes is a pre-release version, that we do not accept pre-release
-        # versions for the version mentioned in the specifier (e.g. <3.1 should
-        # not match 3.1.dev0, but should match 3.0.dev0).
-        if not spec.is_prerelease and prospective.is_prerelease:
-            if Version(prospective.base_version) == Version(spec.base_version):
-                return False
-
-        # If we've gotten to here, it means that prospective version is both
-        # less than the spec version *and* it's not a pre-release of the same
-        # version in the spec.
-        return True
-
-    @_require_version_compare
-    def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
-
-        # Convert our spec to a Version instance, since we'll want to work with
-        # it as a version.
-        spec = Version(spec_str)
-
-        # Check to see if the prospective version is greater than the spec
-        # version. If it's not we can short circuit and just return False now
-        # instead of doing extra unneeded work.
-        if not prospective > spec:
-            return False
-
-        # This special case is here so that, unless the specifier itself
-        # includes is a post-release version, that we do not accept
-        # post-release versions for the version mentioned in the specifier
-        # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
-        if not spec.is_postrelease and prospective.is_postrelease:
-            if Version(prospective.base_version) == Version(spec.base_version):
-                return False
-
-        # Ensure that we do not allow a local version of the version mentioned
-        # in the specifier, which is technically greater than, to match.
-        if prospective.local is not None:
-            if Version(prospective.base_version) == Version(spec.base_version):
-                return False
-
-        # If we've gotten to here, it means that prospective version is both
-        # greater than the spec version *and* it's not a pre-release of the
-        # same version in the spec.
-        return True
-
-    def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
-        return str(prospective).lower() == str(spec).lower()
-
-    @property
-    def prereleases(self) -> bool:
-
-        # If there is an explicit prereleases set for this, then we'll just
-        # blindly use that.
-        if self._prereleases is not None:
-            return self._prereleases
-
-        # Look at all of our specifiers and determine if they are inclusive
-        # operators, and if they are if they are including an explicit
-        # prerelease.
-        operator, version = self._spec
-        if operator in ["==", ">=", "<=", "~=", "==="]:
-            # The == specifier can include a trailing .*, if it does we
-            # want to remove before parsing.
-            if operator == "==" and version.endswith(".*"):
-                version = version[:-2]
-
-            # Parse the version, and if it is a pre-release than this
-            # specifier allows pre-releases.
-            if parse(version).is_prerelease:
-                return True
-
-        return False
-
-    @prereleases.setter
-    def prereleases(self, value: bool) -> None:
-        self._prereleases = value
-
-
-_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
-
-
-def _version_split(version: str) -> List[str]:
-    result: List[str] = []
-    for item in version.split("."):
-        match = _prefix_regex.search(item)
-        if match:
-            result.extend(match.groups())
-        else:
-            result.append(item)
-    return result
-
-
-def _is_not_suffix(segment: str) -> bool:
-    return not any(
-        segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
-    )
-
-
-def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
-    left_split, right_split = [], []
-
-    # Get the release segment of our versions
-    left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
-    right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
-
-    # Get the rest of our versions
-    left_split.append(left[len(left_split[0]) :])
-    right_split.append(right[len(right_split[0]) :])
-
-    # Insert our padding
-    left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
-    right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
-
-    return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
-
-
-class SpecifierSet(BaseSpecifier):
-    def __init__(
-        self, specifiers: str = "", prereleases: Optional[bool] = None
-    ) -> None:
-
-        # Split on , to break each individual specifier into it's own item, and
-        # strip each item to remove leading/trailing whitespace.
-        split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
-
-        # Parsed each individual specifier, attempting first to make it a
-        # Specifier and falling back to a LegacySpecifier.
-        parsed: Set[_IndividualSpecifier] = set()
-        for specifier in split_specifiers:
-            try:
-                parsed.add(Specifier(specifier))
-            except InvalidSpecifier:
-                parsed.add(LegacySpecifier(specifier))
-
-        # Turn our parsed specifiers into a frozen set and save them for later.
-        self._specs = frozenset(parsed)
-
-        # Store our prereleases value so we can use it later to determine if
-        # we accept prereleases or not.
-        self._prereleases = prereleases
-
-    def __repr__(self) -> str:
-        pre = (
-            f", prereleases={self.prereleases!r}"
-            if self._prereleases is not None
-            else ""
-        )
-
-        return f"<SpecifierSet({str(self)!r}{pre})>"
-
-    def __str__(self) -> str:
-        return ",".join(sorted(str(s) for s in self._specs))
-
-    def __hash__(self) -> int:
-        return hash(self._specs)
-
-    def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
-        if isinstance(other, str):
-            other = SpecifierSet(other)
-        elif not isinstance(other, SpecifierSet):
-            return NotImplemented
-
-        specifier = SpecifierSet()
-        specifier._specs = frozenset(self._specs | other._specs)
-
-        if self._prereleases is None and other._prereleases is not None:
-            specifier._prereleases = other._prereleases
-        elif self._prereleases is not None and other._prereleases is None:
-            specifier._prereleases = self._prereleases
-        elif self._prereleases == other._prereleases:
-            specifier._prereleases = self._prereleases
-        else:
-            raise ValueError(
-                "Cannot combine SpecifierSets with True and False prerelease "
-                "overrides."
-            )
-
-        return specifier
-
-    def __eq__(self, other: object) -> bool:
-        if isinstance(other, (str, _IndividualSpecifier)):
-            other = SpecifierSet(str(other))
-        elif not isinstance(other, SpecifierSet):
-            return NotImplemented
-
-        return self._specs == other._specs
-
-    def __len__(self) -> int:
-        return len(self._specs)
-
-    def __iter__(self) -> Iterator[_IndividualSpecifier]:
-        return iter(self._specs)
-
-    @property
-    def prereleases(self) -> Optional[bool]:
-
-        # If we have been given an explicit prerelease modifier, then we'll
-        # pass that through here.
-        if self._prereleases is not None:
-            return self._prereleases
-
-        # If we don't have any specifiers, and we don't have a forced value,
-        # then we'll just return None since we don't know if this should have
-        # pre-releases or not.
-        if not self._specs:
-            return None
-
-        # Otherwise we'll see if any of the given specifiers accept
-        # prereleases, if any of them do we'll return True, otherwise False.
-        return any(s.prereleases for s in self._specs)
-
-    @prereleases.setter
-    def prereleases(self, value: bool) -> None:
-        self._prereleases = value
-
-    def __contains__(self, item: UnparsedVersion) -> bool:
-        return self.contains(item)
-
-    def contains(
-        self, item: UnparsedVersion, prereleases: Optional[bool] = None
-    ) -> bool:
-
-        # Ensure that our item is a Version or LegacyVersion instance.
-        if not isinstance(item, (LegacyVersion, Version)):
-            item = parse(item)
-
-        # Determine if we're forcing a prerelease or not, if we're not forcing
-        # one for this particular filter call, then we'll use whatever the
-        # SpecifierSet thinks for whether or not we should support prereleases.
-        if prereleases is None:
-            prereleases = self.prereleases
-
-        # We can determine if we're going to allow pre-releases by looking to
-        # see if any of the underlying items supports them. If none of them do
-        # and this item is a pre-release then we do not allow it and we can
-        # short circuit that here.
-        # Note: This means that 1.0.dev1 would not be contained in something
-        #       like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
-        if not prereleases and item.is_prerelease:
-            return False
-
-        # We simply dispatch to the underlying specs here to make sure that the
-        # given version is contained within all of them.
-        # Note: This use of all() here means that an empty set of specifiers
-        #       will always return True, this is an explicit design decision.
-        return all(s.contains(item, prereleases=prereleases) for s in self._specs)
-
-    def filter(
-        self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
-    ) -> Iterable[VersionTypeVar]:
-
-        # Determine if we're forcing a prerelease or not, if we're not forcing
-        # one for this particular filter call, then we'll use whatever the
-        # SpecifierSet thinks for whether or not we should support prereleases.
-        if prereleases is None:
-            prereleases = self.prereleases
-
-        # If we have any specifiers, then we want to wrap our iterable in the
-        # filter method for each one, this will act as a logical AND amongst
-        # each specifier.
-        if self._specs:
-            for spec in self._specs:
-                iterable = spec.filter(iterable, prereleases=bool(prereleases))
-            return iterable
-        # If we do not have any specifiers, then we need to have a rough filter
-        # which will filter out any pre-releases, unless there are no final
-        # releases, and which will filter out LegacyVersion in general.
-        else:
-            filtered: List[VersionTypeVar] = []
-            found_prereleases: List[VersionTypeVar] = []
-
-            item: UnparsedVersion
-            parsed_version: Union[Version, LegacyVersion]
-
-            for item in iterable:
-                # Ensure that we some kind of Version class for this item.
-                if not isinstance(item, (LegacyVersion, Version)):
-                    parsed_version = parse(item)
-                else:
-                    parsed_version = item
-
-                # Filter out any item which is parsed as a LegacyVersion
-                if isinstance(parsed_version, LegacyVersion):
-                    continue
-
-                # Store any item which is a pre-release for later unless we've
-                # already found a final version or we are accepting prereleases
-                if parsed_version.is_prerelease and not prereleases:
-                    if not filtered:
-                        found_prereleases.append(item)
-                else:
-                    filtered.append(item)
-
-            # If we've found no items except for pre-releases, then we'll go
-            # ahead and use the pre-releases
-            if not filtered and found_prereleases and prereleases is None:
-                return found_prereleases
-
-            return filtered
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py
deleted file mode 100644
index 9a3d25a..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/tags.py
+++ /dev/null
@@ -1,487 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import logging
-import platform
-import sys
-import sysconfig
-from importlib.machinery import EXTENSION_SUFFIXES
-from typing import (
-    Dict,
-    FrozenSet,
-    Iterable,
-    Iterator,
-    List,
-    Optional,
-    Sequence,
-    Tuple,
-    Union,
-    cast,
-)
-
-from . import _manylinux, _musllinux
-
-logger = logging.getLogger(__name__)
-
-PythonVersion = Sequence[int]
-MacVersion = Tuple[int, int]
-
-INTERPRETER_SHORT_NAMES: Dict[str, str] = {
-    "python": "py",  # Generic.
-    "cpython": "cp",
-    "pypy": "pp",
-    "ironpython": "ip",
-    "jython": "jy",
-}
-
-
-_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
-
-
-class Tag:
-    """
-    A representation of the tag triple for a wheel.
-
-    Instances are considered immutable and thus are hashable. Equality checking
-    is also supported.
-    """
-
-    __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
-
-    def __init__(self, interpreter: str, abi: str, platform: str) -> None:
-        self._interpreter = interpreter.lower()
-        self._abi = abi.lower()
-        self._platform = platform.lower()
-        # The __hash__ of every single element in a Set[Tag] will be evaluated each time
-        # that a set calls its `.disjoint()` method, which may be called hundreds of
-        # times when scanning a page of links for packages with tags matching that
-        # Set[Tag]. Pre-computing the value here produces significant speedups for
-        # downstream consumers.
-        self._hash = hash((self._interpreter, self._abi, self._platform))
-
-    @property
-    def interpreter(self) -> str:
-        return self._interpreter
-
-    @property
-    def abi(self) -> str:
-        return self._abi
-
-    @property
-    def platform(self) -> str:
-        return self._platform
-
-    def __eq__(self, other: object) -> bool:
-        if not isinstance(other, Tag):
-            return NotImplemented
-
-        return (
-            (self._hash == other._hash)  # Short-circuit ASAP for perf reasons.
-            and (self._platform == other._platform)
-            and (self._abi == other._abi)
-            and (self._interpreter == other._interpreter)
-        )
-
-    def __hash__(self) -> int:
-        return self._hash
-
-    def __str__(self) -> str:
-        return f"{self._interpreter}-{self._abi}-{self._platform}"
-
-    def __repr__(self) -> str:
-        return f"<{self} @ {id(self)}>"
-
-
-def parse_tag(tag: str) -> FrozenSet[Tag]:
-    """
-    Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
-
-    Returning a set is required due to the possibility that the tag is a
-    compressed tag set.
-    """
-    tags = set()
-    interpreters, abis, platforms = tag.split("-")
-    for interpreter in interpreters.split("."):
-        for abi in abis.split("."):
-            for platform_ in platforms.split("."):
-                tags.add(Tag(interpreter, abi, platform_))
-    return frozenset(tags)
-
-
-def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
-    value = sysconfig.get_config_var(name)
-    if value is None and warn:
-        logger.debug(
-            "Config variable '%s' is unset, Python ABI tag may be incorrect", name
-        )
-    return value
-
-
-def _normalize_string(string: str) -> str:
-    return string.replace(".", "_").replace("-", "_")
-
-
-def _abi3_applies(python_version: PythonVersion) -> bool:
-    """
-    Determine if the Python version supports abi3.
-
-    PEP 384 was first implemented in Python 3.2.
-    """
-    return len(python_version) > 1 and tuple(python_version) >= (3, 2)
-
-
-def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
-    py_version = tuple(py_version)  # To allow for version comparison.
-    abis = []
-    version = _version_nodot(py_version[:2])
-    debug = pymalloc = ucs4 = ""
-    with_debug = _get_config_var("Py_DEBUG", warn)
-    has_refcount = hasattr(sys, "gettotalrefcount")
-    # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
-    # extension modules is the best option.
-    # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
-    has_ext = "_d.pyd" in EXTENSION_SUFFIXES
-    if with_debug or (with_debug is None and (has_refcount or has_ext)):
-        debug = "d"
-    if py_version < (3, 8):
-        with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
-        if with_pymalloc or with_pymalloc is None:
-            pymalloc = "m"
-        if py_version < (3, 3):
-            unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
-            if unicode_size == 4 or (
-                unicode_size is None and sys.maxunicode == 0x10FFFF
-            ):
-                ucs4 = "u"
-    elif debug:
-        # Debug builds can also load "normal" extension modules.
-        # We can also assume no UCS-4 or pymalloc requirement.
-        abis.append(f"cp{version}")
-    abis.insert(
-        0,
-        "cp{version}{debug}{pymalloc}{ucs4}".format(
-            version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
-        ),
-    )
-    return abis
-
-
-def cpython_tags(
-    python_version: Optional[PythonVersion] = None,
-    abis: Optional[Iterable[str]] = None,
-    platforms: Optional[Iterable[str]] = None,
-    *,
-    warn: bool = False,
-) -> Iterator[Tag]:
-    """
-    Yields the tags for a CPython interpreter.
-
-    The tags consist of:
-    - cp<python_version>-<abi>-<platform>
-    - cp<python_version>-abi3-<platform>
-    - cp<python_version>-none-<platform>
-    - cp<less than python_version>-abi3-<platform>  # Older Python versions down to 3.2.
-
-    If python_version only specifies a major version then user-provided ABIs and
-    the 'none' ABItag will be used.
-
-    If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
-    their normal position and not at the beginning.
-    """
-    if not python_version:
-        python_version = sys.version_info[:2]
-
-    interpreter = f"cp{_version_nodot(python_version[:2])}"
-
-    if abis is None:
-        if len(python_version) > 1:
-            abis = _cpython_abis(python_version, warn)
-        else:
-            abis = []
-    abis = list(abis)
-    # 'abi3' and 'none' are explicitly handled later.
-    for explicit_abi in ("abi3", "none"):
-        try:
-            abis.remove(explicit_abi)
-        except ValueError:
-            pass
-
-    platforms = list(platforms or platform_tags())
-    for abi in abis:
-        for platform_ in platforms:
-            yield Tag(interpreter, abi, platform_)
-    if _abi3_applies(python_version):
-        yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
-    yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
-
-    if _abi3_applies(python_version):
-        for minor_version in range(python_version[1] - 1, 1, -1):
-            for platform_ in platforms:
-                interpreter = "cp{version}".format(
-                    version=_version_nodot((python_version[0], minor_version))
-                )
-                yield Tag(interpreter, "abi3", platform_)
-
-
-def _generic_abi() -> Iterator[str]:
-    abi = sysconfig.get_config_var("SOABI")
-    if abi:
-        yield _normalize_string(abi)
-
-
-def generic_tags(
-    interpreter: Optional[str] = None,
-    abis: Optional[Iterable[str]] = None,
-    platforms: Optional[Iterable[str]] = None,
-    *,
-    warn: bool = False,
-) -> Iterator[Tag]:
-    """
-    Yields the tags for a generic interpreter.
-
-    The tags consist of:
-    - <interpreter>-<abi>-<platform>
-
-    The "none" ABI will be added if it was not explicitly provided.
-    """
-    if not interpreter:
-        interp_name = interpreter_name()
-        interp_version = interpreter_version(warn=warn)
-        interpreter = "".join([interp_name, interp_version])
-    if abis is None:
-        abis = _generic_abi()
-    platforms = list(platforms or platform_tags())
-    abis = list(abis)
-    if "none" not in abis:
-        abis.append("none")
-    for abi in abis:
-        for platform_ in platforms:
-            yield Tag(interpreter, abi, platform_)
-
-
-def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
-    """
-    Yields Python versions in descending order.
-
-    After the latest version, the major-only version will be yielded, and then
-    all previous versions of that major version.
-    """
-    if len(py_version) > 1:
-        yield f"py{_version_nodot(py_version[:2])}"
-    yield f"py{py_version[0]}"
-    if len(py_version) > 1:
-        for minor in range(py_version[1] - 1, -1, -1):
-            yield f"py{_version_nodot((py_version[0], minor))}"
-
-
-def compatible_tags(
-    python_version: Optional[PythonVersion] = None,
-    interpreter: Optional[str] = None,
-    platforms: Optional[Iterable[str]] = None,
-) -> Iterator[Tag]:
-    """
-    Yields the sequence of tags that are compatible with a specific version of Python.
-
-    The tags consist of:
-    - py*-none-<platform>
-    - <interpreter>-none-any  # ... if `interpreter` is provided.
-    - py*-none-any
-    """
-    if not python_version:
-        python_version = sys.version_info[:2]
-    platforms = list(platforms or platform_tags())
-    for version in _py_interpreter_range(python_version):
-        for platform_ in platforms:
-            yield Tag(version, "none", platform_)
-    if interpreter:
-        yield Tag(interpreter, "none", "any")
-    for version in _py_interpreter_range(python_version):
-        yield Tag(version, "none", "any")
-
-
-def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
-    if not is_32bit:
-        return arch
-
-    if arch.startswith("ppc"):
-        return "ppc"
-
-    return "i386"
-
-
-def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
-    formats = [cpu_arch]
-    if cpu_arch == "x86_64":
-        if version < (10, 4):
-            return []
-        formats.extend(["intel", "fat64", "fat32"])
-
-    elif cpu_arch == "i386":
-        if version < (10, 4):
-            return []
-        formats.extend(["intel", "fat32", "fat"])
-
-    elif cpu_arch == "ppc64":
-        # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
-        if version > (10, 5) or version < (10, 4):
-            return []
-        formats.append("fat64")
-
-    elif cpu_arch == "ppc":
-        if version > (10, 6):
-            return []
-        formats.extend(["fat32", "fat"])
-
-    if cpu_arch in {"arm64", "x86_64"}:
-        formats.append("universal2")
-
-    if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
-        formats.append("universal")
-
-    return formats
-
-
-def mac_platforms(
-    version: Optional[MacVersion] = None, arch: Optional[str] = None
-) -> Iterator[str]:
-    """
-    Yields the platform tags for a macOS system.
-
-    The `version` parameter is a two-item tuple specifying the macOS version to
-    generate platform tags for. The `arch` parameter is the CPU architecture to
-    generate platform tags for. Both parameters default to the appropriate value
-    for the current system.
-    """
-    version_str, _, cpu_arch = platform.mac_ver()
-    if version is None:
-        version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
-    else:
-        version = version
-    if arch is None:
-        arch = _mac_arch(cpu_arch)
-    else:
-        arch = arch
-
-    if (10, 0) <= version and version < (11, 0):
-        # Prior to Mac OS 11, each yearly release of Mac OS bumped the
-        # "minor" version number.  The major version was always 10.
-        for minor_version in range(version[1], -1, -1):
-            compat_version = 10, minor_version
-            binary_formats = _mac_binary_formats(compat_version, arch)
-            for binary_format in binary_formats:
-                yield "macosx_{major}_{minor}_{binary_format}".format(
-                    major=10, minor=minor_version, binary_format=binary_format
-                )
-
-    if version >= (11, 0):
-        # Starting with Mac OS 11, each yearly release bumps the major version
-        # number.   The minor versions are now the midyear updates.
-        for major_version in range(version[0], 10, -1):
-            compat_version = major_version, 0
-            binary_formats = _mac_binary_formats(compat_version, arch)
-            for binary_format in binary_formats:
-                yield "macosx_{major}_{minor}_{binary_format}".format(
-                    major=major_version, minor=0, binary_format=binary_format
-                )
-
-    if version >= (11, 0):
-        # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
-        # Arm64 support was introduced in 11.0, so no Arm binaries from previous
-        # releases exist.
-        #
-        # However, the "universal2" binary format can have a
-        # macOS version earlier than 11.0 when the x86_64 part of the binary supports
-        # that version of macOS.
-        if arch == "x86_64":
-            for minor_version in range(16, 3, -1):
-                compat_version = 10, minor_version
-                binary_formats = _mac_binary_formats(compat_version, arch)
-                for binary_format in binary_formats:
-                    yield "macosx_{major}_{minor}_{binary_format}".format(
-                        major=compat_version[0],
-                        minor=compat_version[1],
-                        binary_format=binary_format,
-                    )
-        else:
-            for minor_version in range(16, 3, -1):
-                compat_version = 10, minor_version
-                binary_format = "universal2"
-                yield "macosx_{major}_{minor}_{binary_format}".format(
-                    major=compat_version[0],
-                    minor=compat_version[1],
-                    binary_format=binary_format,
-                )
-
-
-def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
-    linux = _normalize_string(sysconfig.get_platform())
-    if is_32bit:
-        if linux == "linux_x86_64":
-            linux = "linux_i686"
-        elif linux == "linux_aarch64":
-            linux = "linux_armv7l"
-    _, arch = linux.split("_", 1)
-    yield from _manylinux.platform_tags(linux, arch)
-    yield from _musllinux.platform_tags(arch)
-    yield linux
-
-
-def _generic_platforms() -> Iterator[str]:
-    yield _normalize_string(sysconfig.get_platform())
-
-
-def platform_tags() -> Iterator[str]:
-    """
-    Provides the platform tags for this installation.
-    """
-    if platform.system() == "Darwin":
-        return mac_platforms()
-    elif platform.system() == "Linux":
-        return _linux_platforms()
-    else:
-        return _generic_platforms()
-
-
-def interpreter_name() -> str:
-    """
-    Returns the name of the running interpreter.
-    """
-    name = sys.implementation.name
-    return INTERPRETER_SHORT_NAMES.get(name) or name
-
-
-def interpreter_version(*, warn: bool = False) -> str:
-    """
-    Returns the version of the running interpreter.
-    """
-    version = _get_config_var("py_version_nodot", warn=warn)
-    if version:
-        version = str(version)
-    else:
-        version = _version_nodot(sys.version_info[:2])
-    return version
-
-
-def _version_nodot(version: PythonVersion) -> str:
-    return "".join(map(str, version))
-
-
-def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
-    """
-    Returns the sequence of tag triples for the running interpreter.
-
-    The order of the sequence corresponds to priority order for the
-    interpreter, from most to least important.
-    """
-
-    interp_name = interpreter_name()
-    if interp_name == "cp":
-        yield from cpython_tags(warn=warn)
-    else:
-        yield from generic_tags()
-
-    if interp_name == "pp":
-        yield from compatible_tags(interpreter="pp3")
-    else:
-        yield from compatible_tags()
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py
deleted file mode 100644
index bab11b8..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/utils.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import re
-from typing import FrozenSet, NewType, Tuple, Union, cast
-
-from .tags import Tag, parse_tag
-from .version import InvalidVersion, Version
-
-BuildTag = Union[Tuple[()], Tuple[int, str]]
-NormalizedName = NewType("NormalizedName", str)
-
-
-class InvalidWheelFilename(ValueError):
-    """
-    An invalid wheel filename was found, users should refer to PEP 427.
-    """
-
-
-class InvalidSdistFilename(ValueError):
-    """
-    An invalid sdist filename was found, users should refer to the packaging user guide.
-    """
-
-
-_canonicalize_regex = re.compile(r"[-_.]+")
-# PEP 427: The build number must start with a digit.
-_build_tag_regex = re.compile(r"(\d+)(.*)")
-
-
-def canonicalize_name(name: str) -> NormalizedName:
-    # This is taken from PEP 503.
-    value = _canonicalize_regex.sub("-", name).lower()
-    return cast(NormalizedName, value)
-
-
-def canonicalize_version(version: Union[Version, str]) -> str:
-    """
-    This is very similar to Version.__str__, but has one subtle difference
-    with the way it handles the release segment.
-    """
-    if isinstance(version, str):
-        try:
-            parsed = Version(version)
-        except InvalidVersion:
-            # Legacy versions cannot be normalized
-            return version
-    else:
-        parsed = version
-
-    parts = []
-
-    # Epoch
-    if parsed.epoch != 0:
-        parts.append(f"{parsed.epoch}!")
-
-    # Release segment
-    # NB: This strips trailing '.0's to normalize
-    parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
-
-    # Pre-release
-    if parsed.pre is not None:
-        parts.append("".join(str(x) for x in parsed.pre))
-
-    # Post-release
-    if parsed.post is not None:
-        parts.append(f".post{parsed.post}")
-
-    # Development release
-    if parsed.dev is not None:
-        parts.append(f".dev{parsed.dev}")
-
-    # Local version segment
-    if parsed.local is not None:
-        parts.append(f"+{parsed.local}")
-
-    return "".join(parts)
-
-
-def parse_wheel_filename(
-    filename: str,
-) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
-    if not filename.endswith(".whl"):
-        raise InvalidWheelFilename(
-            f"Invalid wheel filename (extension must be '.whl'): {filename}"
-        )
-
-    filename = filename[:-4]
-    dashes = filename.count("-")
-    if dashes not in (4, 5):
-        raise InvalidWheelFilename(
-            f"Invalid wheel filename (wrong number of parts): {filename}"
-        )
-
-    parts = filename.split("-", dashes - 2)
-    name_part = parts[0]
-    # See PEP 427 for the rules on escaping the project name
-    if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
-        raise InvalidWheelFilename(f"Invalid project name: {filename}")
-    name = canonicalize_name(name_part)
-    version = Version(parts[1])
-    if dashes == 5:
-        build_part = parts[2]
-        build_match = _build_tag_regex.match(build_part)
-        if build_match is None:
-            raise InvalidWheelFilename(
-                f"Invalid build number: {build_part} in '{filename}'"
-            )
-        build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
-    else:
-        build = ()
-    tags = parse_tag(parts[-1])
-    return (name, version, build, tags)
-
-
-def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
-    if filename.endswith(".tar.gz"):
-        file_stem = filename[: -len(".tar.gz")]
-    elif filename.endswith(".zip"):
-        file_stem = filename[: -len(".zip")]
-    else:
-        raise InvalidSdistFilename(
-            f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
-            f" {filename}"
-        )
-
-    # We are requiring a PEP 440 version, which cannot contain dashes,
-    # so we split on the last dash.
-    name_part, sep, version_part = file_stem.rpartition("-")
-    if not sep:
-        raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
-
-    name = canonicalize_name(name_part)
-    version = Version(version_part)
-    return (name, version)
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py
deleted file mode 100644
index de9a09a..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/packaging/version.py
+++ /dev/null
@@ -1,504 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import collections
-import itertools
-import re
-import warnings
-from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
-
-from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
-
-__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
-
-InfiniteTypes = Union[InfinityType, NegativeInfinityType]
-PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
-SubLocalType = Union[InfiniteTypes, int, str]
-LocalType = Union[
-    NegativeInfinityType,
-    Tuple[
-        Union[
-            SubLocalType,
-            Tuple[SubLocalType, str],
-            Tuple[NegativeInfinityType, SubLocalType],
-        ],
-        ...,
-    ],
-]
-CmpKey = Tuple[
-    int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
-]
-LegacyCmpKey = Tuple[int, Tuple[str, ...]]
-VersionComparisonMethod = Callable[
-    [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
-]
-
-_Version = collections.namedtuple(
-    "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
-)
-
-
-def parse(version: str) -> Union["LegacyVersion", "Version"]:
-    """
-    Parse the given version string and return either a :class:`Version` object
-    or a :class:`LegacyVersion` object depending on if the given version is
-    a valid PEP 440 version or a legacy version.
-    """
-    try:
-        return Version(version)
-    except InvalidVersion:
-        return LegacyVersion(version)
-
-
-class InvalidVersion(ValueError):
-    """
-    An invalid version was found, users should refer to PEP 440.
-    """
-
-
-class _BaseVersion:
-    _key: Union[CmpKey, LegacyCmpKey]
-
-    def __hash__(self) -> int:
-        return hash(self._key)
-
-    # Please keep the duplicated `isinstance` check
-    # in the six comparisons hereunder
-    # unless you find a way to avoid adding overhead function calls.
-    def __lt__(self, other: "_BaseVersion") -> bool:
-        if not isinstance(other, _BaseVersion):
-            return NotImplemented
-
-        return self._key < other._key
-
-    def __le__(self, other: "_BaseVersion") -> bool:
-        if not isinstance(other, _BaseVersion):
-            return NotImplemented
-
-        return self._key <= other._key
-
-    def __eq__(self, other: object) -> bool:
-        if not isinstance(other, _BaseVersion):
-            return NotImplemented
-
-        return self._key == other._key
-
-    def __ge__(self, other: "_BaseVersion") -> bool:
-        if not isinstance(other, _BaseVersion):
-            return NotImplemented
-
-        return self._key >= other._key
-
-    def __gt__(self, other: "_BaseVersion") -> bool:
-        if not isinstance(other, _BaseVersion):
-            return NotImplemented
-
-        return self._key > other._key
-
-    def __ne__(self, other: object) -> bool:
-        if not isinstance(other, _BaseVersion):
-            return NotImplemented
-
-        return self._key != other._key
-
-
-class LegacyVersion(_BaseVersion):
-    def __init__(self, version: str) -> None:
-        self._version = str(version)
-        self._key = _legacy_cmpkey(self._version)
-
-        warnings.warn(
-            "Creating a LegacyVersion has been deprecated and will be "
-            "removed in the next major release",
-            DeprecationWarning,
-        )
-
-    def __str__(self) -> str:
-        return self._version
-
-    def __repr__(self) -> str:
-        return f"<LegacyVersion('{self}')>"
-
-    @property
-    def public(self) -> str:
-        return self._version
-
-    @property
-    def base_version(self) -> str:
-        return self._version
-
-    @property
-    def epoch(self) -> int:
-        return -1
-
-    @property
-    def release(self) -> None:
-        return None
-
-    @property
-    def pre(self) -> None:
-        return None
-
-    @property
-    def post(self) -> None:
-        return None
-
-    @property
-    def dev(self) -> None:
-        return None
-
-    @property
-    def local(self) -> None:
-        return None
-
-    @property
-    def is_prerelease(self) -> bool:
-        return False
-
-    @property
-    def is_postrelease(self) -> bool:
-        return False
-
-    @property
-    def is_devrelease(self) -> bool:
-        return False
-
-
-_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
-
-_legacy_version_replacement_map = {
-    "pre": "c",
-    "preview": "c",
-    "-": "final-",
-    "rc": "c",
-    "dev": "@",
-}
-
-
-def _parse_version_parts(s: str) -> Iterator[str]:
-    for part in _legacy_version_component_re.split(s):
-        part = _legacy_version_replacement_map.get(part, part)
-
-        if not part or part == ".":
-            continue
-
-        if part[:1] in "0123456789":
-            # pad for numeric comparison
-            yield part.zfill(8)
-        else:
-            yield "*" + part
-
-    # ensure that alpha/beta/candidate are before final
-    yield "*final"
-
-
-def _legacy_cmpkey(version: str) -> LegacyCmpKey:
-
-    # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
-    # greater than or equal to 0. This will effectively put the LegacyVersion,
-    # which uses the defacto standard originally implemented by setuptools,
-    # as before all PEP 440 versions.
-    epoch = -1
-
-    # This scheme is taken from pkg_resources.parse_version setuptools prior to
-    # it's adoption of the packaging library.
-    parts: List[str] = []
-    for part in _parse_version_parts(version.lower()):
-        if part.startswith("*"):
-            # remove "-" before a prerelease tag
-            if part < "*final":
-                while parts and parts[-1] == "*final-":
-                    parts.pop()
-
-            # remove trailing zeros from each series of numeric parts
-            while parts and parts[-1] == "00000000":
-                parts.pop()
-
-        parts.append(part)
-
-    return epoch, tuple(parts)
-
-
-# Deliberately not anchored to the start and end of the string, to make it
-# easier for 3rd party code to reuse
-VERSION_PATTERN = r"""
-    v?
-    (?:
-        (?:(?P<epoch>[0-9]+)!)?                           # epoch
-        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
-        (?P<pre>                                          # pre-release
-            [-_\.]?
-            (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
-            [-_\.]?
-            (?P<pre_n>[0-9]+)?
-        )?
-        (?P<post>                                         # post release
-            (?:-(?P<post_n1>[0-9]+))
-            |
-            (?:
-                [-_\.]?
-                (?P<post_l>post|rev|r)
-                [-_\.]?
-                (?P<post_n2>[0-9]+)?
-            )
-        )?
-        (?P<dev>                                          # dev release
-            [-_\.]?
-            (?P<dev_l>dev)
-            [-_\.]?
-            (?P<dev_n>[0-9]+)?
-        )?
-    )
-    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
-"""
-
-
-class Version(_BaseVersion):
-
-    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
-    def __init__(self, version: str) -> None:
-
-        # Validate the version and parse it into pieces
-        match = self._regex.search(version)
-        if not match:
-            raise InvalidVersion(f"Invalid version: '{version}'")
-
-        # Store the parsed out pieces of the version
-        self._version = _Version(
-            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
-            release=tuple(int(i) for i in match.group("release").split(".")),
-            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
-            post=_parse_letter_version(
-                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
-            ),
-            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
-            local=_parse_local_version(match.group("local")),
-        )
-
-        # Generate a key which will be used for sorting
-        self._key = _cmpkey(
-            self._version.epoch,
-            self._version.release,
-            self._version.pre,
-            self._version.post,
-            self._version.dev,
-            self._version.local,
-        )
-
-    def __repr__(self) -> str:
-        return f"<Version('{self}')>"
-
-    def __str__(self) -> str:
-        parts = []
-
-        # Epoch
-        if self.epoch != 0:
-            parts.append(f"{self.epoch}!")
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self.release))
-
-        # Pre-release
-        if self.pre is not None:
-            parts.append("".join(str(x) for x in self.pre))
-
-        # Post-release
-        if self.post is not None:
-            parts.append(f".post{self.post}")
-
-        # Development release
-        if self.dev is not None:
-            parts.append(f".dev{self.dev}")
-
-        # Local version segment
-        if self.local is not None:
-            parts.append(f"+{self.local}")
-
-        return "".join(parts)
-
-    @property
-    def epoch(self) -> int:
-        _epoch: int = self._version.epoch
-        return _epoch
-
-    @property
-    def release(self) -> Tuple[int, ...]:
-        _release: Tuple[int, ...] = self._version.release
-        return _release
-
-    @property
-    def pre(self) -> Optional[Tuple[str, int]]:
-        _pre: Optional[Tuple[str, int]] = self._version.pre
-        return _pre
-
-    @property
-    def post(self) -> Optional[int]:
-        return self._version.post[1] if self._version.post else None
-
-    @property
-    def dev(self) -> Optional[int]:
-        return self._version.dev[1] if self._version.dev else None
-
-    @property
-    def local(self) -> Optional[str]:
-        if self._version.local:
-            return ".".join(str(x) for x in self._version.local)
-        else:
-            return None
-
-    @property
-    def public(self) -> str:
-        return str(self).split("+", 1)[0]
-
-    @property
-    def base_version(self) -> str:
-        parts = []
-
-        # Epoch
-        if self.epoch != 0:
-            parts.append(f"{self.epoch}!")
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self.release))
-
-        return "".join(parts)
-
-    @property
-    def is_prerelease(self) -> bool:
-        return self.dev is not None or self.pre is not None
-
-    @property
-    def is_postrelease(self) -> bool:
-        return self.post is not None
-
-    @property
-    def is_devrelease(self) -> bool:
-        return self.dev is not None
-
-    @property
-    def major(self) -> int:
-        return self.release[0] if len(self.release) >= 1 else 0
-
-    @property
-    def minor(self) -> int:
-        return self.release[1] if len(self.release) >= 2 else 0
-
-    @property
-    def micro(self) -> int:
-        return self.release[2] if len(self.release) >= 3 else 0
-
-
-def _parse_letter_version(
-    letter: str, number: Union[str, bytes, SupportsInt]
-) -> Optional[Tuple[str, int]]:
-
-    if letter:
-        # We consider there to be an implicit 0 in a pre-release if there is
-        # not a numeral associated with it.
-        if number is None:
-            number = 0
-
-        # We normalize any letters to their lower case form
-        letter = letter.lower()
-
-        # We consider some words to be alternate spellings of other words and
-        # in those cases we want to normalize the spellings to our preferred
-        # spelling.
-        if letter == "alpha":
-            letter = "a"
-        elif letter == "beta":
-            letter = "b"
-        elif letter in ["c", "pre", "preview"]:
-            letter = "rc"
-        elif letter in ["rev", "r"]:
-            letter = "post"
-
-        return letter, int(number)
-    if not letter and number:
-        # We assume if we are given a number, but we are not given a letter
-        # then this is using the implicit post release syntax (e.g. 1.0-1)
-        letter = "post"
-
-        return letter, int(number)
-
-    return None
-
-
-_local_version_separators = re.compile(r"[\._-]")
-
-
-def _parse_local_version(local: str) -> Optional[LocalType]:
-    """
-    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
-    """
-    if local is not None:
-        return tuple(
-            part.lower() if not part.isdigit() else int(part)
-            for part in _local_version_separators.split(local)
-        )
-    return None
-
-
-def _cmpkey(
-    epoch: int,
-    release: Tuple[int, ...],
-    pre: Optional[Tuple[str, int]],
-    post: Optional[Tuple[str, int]],
-    dev: Optional[Tuple[str, int]],
-    local: Optional[Tuple[SubLocalType]],
-) -> CmpKey:
-
-    # When we compare a release version, we want to compare it with all of the
-    # trailing zeros removed. So we'll use a reverse the list, drop all the now
-    # leading zeros until we come to something non zero, then take the rest
-    # re-reverse it back into the correct order and make it a tuple and use
-    # that for our sorting key.
-    _release = tuple(
-        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
-    )
-
-    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
-    # We'll do this by abusing the pre segment, but we _only_ want to do this
-    # if there is not a pre or a post segment. If we have one of those then
-    # the normal sorting rules will handle this case correctly.
-    if pre is None and post is None and dev is not None:
-        _pre: PrePostDevType = NegativeInfinity
-    # Versions without a pre-release (except as noted above) should sort after
-    # those with one.
-    elif pre is None:
-        _pre = Infinity
-    else:
-        _pre = pre
-
-    # Versions without a post segment should sort before those with one.
-    if post is None:
-        _post: PrePostDevType = NegativeInfinity
-
-    else:
-        _post = post
-
-    # Versions without a development segment should sort after those with one.
-    if dev is None:
-        _dev: PrePostDevType = Infinity
-
-    else:
-        _dev = dev
-
-    if local is None:
-        # Versions without a local segment should sort before those with one.
-        _local: LocalType = NegativeInfinity
-    else:
-        # Versions with a local segment need that segment parsed to implement
-        # the sorting rules in PEP440.
-        # - Alpha numeric segments sort before numeric segments
-        # - Alpha numeric segments sort lexicographically
-        # - Numeric segments sort numerically
-        # - Shorter versions sort before longer versions when the prefixes
-        #   match exactly
-        _local = tuple(
-            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
-        )
-
-    return epoch, _release, _pre, _post, _dev, _local
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__init__.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__init__.py
deleted file mode 100644
index 45f334d..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__init__.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# module pyparsing.py
-#
-# Copyright (c) 2003-2022  Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__doc__ = """
-pyparsing module - Classes and methods to define and execute parsing grammars
-=============================================================================
-
-The pyparsing module is an alternative approach to creating and
-executing simple grammars, vs. the traditional lex/yacc approach, or the
-use of regular expressions.  With pyparsing, you don't need to learn
-a new syntax for defining grammars or matching expressions - the parsing
-module provides a library of classes that you use to construct the
-grammar directly in Python.
-
-Here is a program to parse "Hello, World!" (or any greeting of the form
-``"<salutation>, <addressee>!"``), built up using :class:`Word`,
-:class:`Literal`, and :class:`And` elements
-(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
-and the strings are auto-converted to :class:`Literal` expressions)::
-
-    from pyparsing import Word, alphas
-
-    # define grammar of a greeting
-    greet = Word(alphas) + "," + Word(alphas) + "!"
-
-    hello = "Hello, World!"
-    print(hello, "->", greet.parse_string(hello))
-
-The program outputs the following::
-
-    Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the
-self-explanatory class names, and the use of :class:`'+'<And>`,
-:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
-
-The :class:`ParseResults` object returned from
-:class:`ParserElement.parseString` can be
-accessed as a nested list, a dictionary, or an object with named
-attributes.
-
-The pyparsing module handles some of the problems that are typically
-vexing when writing text parsers:
-
-  - extra or missing whitespace (the above program will also handle
-    "Hello,World!", "Hello  ,  World  !", etc.)
-  - quoted strings
-  - embedded comments
-
-
-Getting Started -
------------------
-Visit the classes :class:`ParserElement` and :class:`ParseResults` to
-see the base classes that most other pyparsing
-classes inherit from. Use the docstrings for examples of how to:
-
- - construct literal match expressions from :class:`Literal` and
-   :class:`CaselessLiteral` classes
- - construct character word-group expressions using the :class:`Word`
-   class
- - see how to create repetitive expressions using :class:`ZeroOrMore`
-   and :class:`OneOrMore` classes
- - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
-   and :class:`'&'<Each>` operators to combine simple expressions into
-   more complex ones
- - associate names with your parsed results using
-   :class:`ParserElement.setResultsName`
- - access the parsed data, which is returned as a :class:`ParseResults`
-   object
- - find some helpful expression short-cuts like :class:`delimitedList`
-   and :class:`oneOf`
- - find more useful common expressions in the :class:`pyparsing_common`
-   namespace class
-"""
-from typing import NamedTuple
-
-
-class version_info(NamedTuple):
-    major: int
-    minor: int
-    micro: int
-    releaselevel: str
-    serial: int
-
-    @property
-    def __version__(self):
-        return (
-            "{}.{}.{}".format(self.major, self.minor, self.micro)
-            + (
-                "{}{}{}".format(
-                    "r" if self.releaselevel[0] == "c" else "",
-                    self.releaselevel[0],
-                    self.serial,
-                ),
-                "",
-            )[self.releaselevel == "final"]
-        )
-
-    def __str__(self):
-        return "{} {} / {}".format(__name__, self.__version__, __version_time__)
-
-    def __repr__(self):
-        return "{}.{}({})".format(
-            __name__,
-            type(self).__name__,
-            ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
-        )
-
-
-__version_info__ = version_info(3, 0, 8, "final", 0)
-__version_time__ = "09 Apr 2022 23:29 UTC"
-__version__ = __version_info__.__version__
-__versionTime__ = __version_time__
-__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
-
-from .util import *
-from .exceptions import *
-from .actions import *
-from .core import __diag__, __compat__
-from .results import *
-from .core import *
-from .core import _builtin_exprs as core_builtin_exprs
-from .helpers import *
-from .helpers import _builtin_exprs as helper_builtin_exprs
-
-from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
-from .testing import pyparsing_test as testing
-from .common import (
-    pyparsing_common as common,
-    _builtin_exprs as common_builtin_exprs,
-)
-
-# define backward compat synonyms
-if "pyparsing_unicode" not in globals():
-    pyparsing_unicode = unicode
-if "pyparsing_common" not in globals():
-    pyparsing_common = common
-if "pyparsing_test" not in globals():
-    pyparsing_test = testing
-
-core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
-
-
-__all__ = [
-    "__version__",
-    "__version_time__",
-    "__author__",
-    "__compat__",
-    "__diag__",
-    "And",
-    "AtLineStart",
-    "AtStringStart",
-    "CaselessKeyword",
-    "CaselessLiteral",
-    "CharsNotIn",
-    "Combine",
-    "Dict",
-    "Each",
-    "Empty",
-    "FollowedBy",
-    "Forward",
-    "GoToColumn",
-    "Group",
-    "IndentedBlock",
-    "Keyword",
-    "LineEnd",
-    "LineStart",
-    "Literal",
-    "Located",
-    "PrecededBy",
-    "MatchFirst",
-    "NoMatch",
-    "NotAny",
-    "OneOrMore",
-    "OnlyOnce",
-    "OpAssoc",
-    "Opt",
-    "Optional",
-    "Or",
-    "ParseBaseException",
-    "ParseElementEnhance",
-    "ParseException",
-    "ParseExpression",
-    "ParseFatalException",
-    "ParseResults",
-    "ParseSyntaxException",
-    "ParserElement",
-    "PositionToken",
-    "QuotedString",
-    "RecursiveGrammarException",
-    "Regex",
-    "SkipTo",
-    "StringEnd",
-    "StringStart",
-    "Suppress",
-    "Token",
-    "TokenConverter",
-    "White",
-    "Word",
-    "WordEnd",
-    "WordStart",
-    "ZeroOrMore",
-    "Char",
-    "alphanums",
-    "alphas",
-    "alphas8bit",
-    "any_close_tag",
-    "any_open_tag",
-    "c_style_comment",
-    "col",
-    "common_html_entity",
-    "counted_array",
-    "cpp_style_comment",
-    "dbl_quoted_string",
-    "dbl_slash_comment",
-    "delimited_list",
-    "dict_of",
-    "empty",
-    "hexnums",
-    "html_comment",
-    "identchars",
-    "identbodychars",
-    "java_style_comment",
-    "line",
-    "line_end",
-    "line_start",
-    "lineno",
-    "make_html_tags",
-    "make_xml_tags",
-    "match_only_at_col",
-    "match_previous_expr",
-    "match_previous_literal",
-    "nested_expr",
-    "null_debug_action",
-    "nums",
-    "one_of",
-    "printables",
-    "punc8bit",
-    "python_style_comment",
-    "quoted_string",
-    "remove_quotes",
-    "replace_with",
-    "replace_html_entity",
-    "rest_of_line",
-    "sgl_quoted_string",
-    "srange",
-    "string_end",
-    "string_start",
-    "trace_parse_action",
-    "unicode_string",
-    "with_attribute",
-    "indentedBlock",
-    "original_text_for",
-    "ungroup",
-    "infix_notation",
-    "locatedExpr",
-    "with_class",
-    "CloseMatch",
-    "token_map",
-    "pyparsing_common",
-    "pyparsing_unicode",
-    "unicode_set",
-    "condition_as_parse_action",
-    "pyparsing_test",
-    # pre-PEP8 compatibility names
-    "__versionTime__",
-    "anyCloseTag",
-    "anyOpenTag",
-    "cStyleComment",
-    "commonHTMLEntity",
-    "countedArray",
-    "cppStyleComment",
-    "dblQuotedString",
-    "dblSlashComment",
-    "delimitedList",
-    "dictOf",
-    "htmlComment",
-    "javaStyleComment",
-    "lineEnd",
-    "lineStart",
-    "makeHTMLTags",
-    "makeXMLTags",
-    "matchOnlyAtCol",
-    "matchPreviousExpr",
-    "matchPreviousLiteral",
-    "nestedExpr",
-    "nullDebugAction",
-    "oneOf",
-    "opAssoc",
-    "pythonStyleComment",
-    "quotedString",
-    "removeQuotes",
-    "replaceHTMLEntity",
-    "replaceWith",
-    "restOfLine",
-    "sglQuotedString",
-    "stringEnd",
-    "stringStart",
-    "traceParseAction",
-    "unicodeString",
-    "withAttribute",
-    "indentedBlock",
-    "originalTextFor",
-    "infixNotation",
-    "locatedExpr",
-    "withClass",
-    "tokenMap",
-    "conditionAsParseAction",
-    "autoname_elements",
-]
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/__init__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/__init__.cpython-310.pyc
deleted file mode 100644
index 9b3002c..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/__init__.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/actions.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/actions.cpython-310.pyc
deleted file mode 100644
index 76778d4..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/actions.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/common.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/common.cpython-310.pyc
deleted file mode 100644
index 187b628..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/common.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/core.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/core.cpython-310.pyc
deleted file mode 100644
index a87cae6..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/core.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/exceptions.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/exceptions.cpython-310.pyc
deleted file mode 100644
index 8ef1a01..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/exceptions.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/helpers.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/helpers.cpython-310.pyc
deleted file mode 100644
index 37030c9..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/helpers.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/results.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/results.cpython-310.pyc
deleted file mode 100644
index 15389a8..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/results.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/testing.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/testing.cpython-310.pyc
deleted file mode 100644
index 76a6a42..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/testing.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/unicode.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/unicode.cpython-310.pyc
deleted file mode 100644
index cbedf4f..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/unicode.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/util.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/util.cpython-310.pyc
deleted file mode 100644
index 25d3c2e..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/__pycache__/util.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/actions.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/actions.py
deleted file mode 100644
index 2bcc550..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/actions.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# actions.py
-
-from .exceptions import ParseException
-from .util import col
-
-
-class OnlyOnce:
-    """
-    Wrapper for parse actions, to ensure they are only called once.
-    """
-
-    def __init__(self, method_call):
-        from .core import _trim_arity
-
-        self.callable = _trim_arity(method_call)
-        self.called = False
-
-    def __call__(self, s, l, t):
-        if not self.called:
-            results = self.callable(s, l, t)
-            self.called = True
-            return results
-        raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
-
-    def reset(self):
-        """
-        Allow the associated parse action to be called once more.
-        """
-
-        self.called = False
-
-
-def match_only_at_col(n):
-    """
-    Helper method for defining parse actions that require matching at
-    a specific column in the input text.
-    """
-
-    def verify_col(strg, locn, toks):
-        if col(locn, strg) != n:
-            raise ParseException(strg, locn, "matched token not at column {}".format(n))
-
-    return verify_col
-
-
-def replace_with(repl_str):
-    """
-    Helper method for common parse actions that simply return
-    a literal value.  Especially useful when used with
-    :class:`transform_string<ParserElement.transform_string>` ().
-
-    Example::
-
-        num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
-        na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
-        term = na | num
-
-        OneOrMore(term).parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
-    """
-    return lambda s, l, t: [repl_str]
-
-
-def remove_quotes(s, l, t):
-    """
-    Helper parse action for removing quotation marks from parsed
-    quoted strings.
-
-    Example::
-
-        # by default, quotation marks are included in parsed results
-        quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
-
-        # use remove_quotes to strip quotation marks from parsed results
-        quoted_string.set_parse_action(remove_quotes)
-        quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
-    """
-    return t[0][1:-1]
-
-
-def with_attribute(*args, **attr_dict):
-    """
-    Helper to create a validating parse action to be used with start
-    tags created with :class:`make_xml_tags` or
-    :class:`make_html_tags`. Use ``with_attribute`` to qualify
-    a starting tag with a required attribute value, to avoid false
-    matches on common tags such as ``<TD>`` or ``<DIV>``.
-
-    Call ``with_attribute`` with a series of attribute names and
-    values. Specify the list of filter attributes names and values as:
-
-    - keyword arguments, as in ``(align="right")``, or
-    - as an explicit dict with ``**`` operator, when an attribute
-      name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
-    - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
-
-    For attribute names with a namespace prefix, you must use the second
-    form.  Attribute names are matched insensitive to upper/lower case.
-
-    If just testing for ``class`` (with or without a namespace), use
-    :class:`with_class`.
-
-    To verify that the attribute exists, but without specifying a value,
-    pass ``with_attribute.ANY_VALUE`` as the value.
-
-    Example::
-
-        html = '''
-            <div>
-            Some text
-            <div type="grid">1 4 0 1 0</div>
-            <div type="graph">1,3 2,3 1,1</div>
-            <div>this has no type</div>
-            </div>
-
-        '''
-        div,div_end = make_html_tags("div")
-
-        # only match div tag having a type attribute with value "grid"
-        div_grid = div().set_parse_action(with_attribute(type="grid"))
-        grid_expr = div_grid + SkipTo(div | div_end)("body")
-        for grid_header in grid_expr.search_string(html):
-            print(grid_header.body)
-
-        # construct a match with any div tag having a type attribute, regardless of the value
-        div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
-        div_expr = div_any_type + SkipTo(div | div_end)("body")
-        for div_header in div_expr.search_string(html):
-            print(div_header.body)
-
-    prints::
-
-        1 4 0 1 0
-
-        1 4 0 1 0
-        1,3 2,3 1,1
-    """
-    if args:
-        attrs = args[:]
-    else:
-        attrs = attr_dict.items()
-    attrs = [(k, v) for k, v in attrs]
-
-    def pa(s, l, tokens):
-        for attrName, attrValue in attrs:
-            if attrName not in tokens:
-                raise ParseException(s, l, "no matching attribute " + attrName)
-            if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
-                raise ParseException(
-                    s,
-                    l,
-                    "attribute {!r} has value {!r}, must be {!r}".format(
-                        attrName, tokens[attrName], attrValue
-                    ),
-                )
-
-    return pa
-
-
-with_attribute.ANY_VALUE = object()
-
-
-def with_class(classname, namespace=""):
-    """
-    Simplified version of :class:`with_attribute` when
-    matching on a div class - made difficult because ``class`` is
-    a reserved word in Python.
-
-    Example::
-
-        html = '''
-            <div>
-            Some text
-            <div class="grid">1 4 0 1 0</div>
-            <div class="graph">1,3 2,3 1,1</div>
-            <div>this &lt;div&gt; has no class</div>
-            </div>
-
-        '''
-        div,div_end = make_html_tags("div")
-        div_grid = div().set_parse_action(with_class("grid"))
-
-        grid_expr = div_grid + SkipTo(div | div_end)("body")
-        for grid_header in grid_expr.search_string(html):
-            print(grid_header.body)
-
-        div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
-        div_expr = div_any_type + SkipTo(div | div_end)("body")
-        for div_header in div_expr.search_string(html):
-            print(div_header.body)
-
-    prints::
-
-        1 4 0 1 0
-
-        1 4 0 1 0
-        1,3 2,3 1,1
-    """
-    classattr = "{}:class".format(namespace) if namespace else "class"
-    return with_attribute(**{classattr: classname})
-
-
-# pre-PEP8 compatibility symbols
-replaceWith = replace_with
-removeQuotes = remove_quotes
-withAttribute = with_attribute
-withClass = with_class
-matchOnlyAtCol = match_only_at_col
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/common.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/common.py
deleted file mode 100644
index 1859fb7..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/common.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# common.py
-from .core import *
-from .helpers import delimited_list, any_open_tag, any_close_tag
-from datetime import datetime
-
-
-# some other useful expressions - using lower-case class name since we are really using this as a namespace
-class pyparsing_common:
-    """Here are some common low-level expressions that may be useful in
-    jump-starting parser development:
-
-    - numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
-      :class:`scientific notation<sci_real>`)
-    - common :class:`programming identifiers<identifier>`
-    - network addresses (:class:`MAC<mac_address>`,
-      :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
-    - ISO8601 :class:`dates<iso8601_date>` and
-      :class:`datetime<iso8601_datetime>`
-    - :class:`UUID<uuid>`
-    - :class:`comma-separated list<comma_separated_list>`
-    - :class:`url`
-
-    Parse actions:
-
-    - :class:`convertToInteger`
-    - :class:`convertToFloat`
-    - :class:`convertToDate`
-    - :class:`convertToDatetime`
-    - :class:`stripHTMLTags`
-    - :class:`upcaseTokens`
-    - :class:`downcaseTokens`
-
-    Example::
-
-        pyparsing_common.number.runTests('''
-            # any int or real number, returned as the appropriate type
-            100
-            -100
-            +100
-            3.14159
-            6.02e23
-            1e-12
-            ''')
-
-        pyparsing_common.fnumber.runTests('''
-            # any int or real number, returned as float
-            100
-            -100
-            +100
-            3.14159
-            6.02e23
-            1e-12
-            ''')
-
-        pyparsing_common.hex_integer.runTests('''
-            # hex numbers
-            100
-            FF
-            ''')
-
-        pyparsing_common.fraction.runTests('''
-            # fractions
-            1/2
-            -3/4
-            ''')
-
-        pyparsing_common.mixed_integer.runTests('''
-            # mixed fractions
-            1
-            1/2
-            -3/4
-            1-3/4
-            ''')
-
-        import uuid
-        pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
-        pyparsing_common.uuid.runTests('''
-            # uuid
-            12345678-1234-5678-1234-567812345678
-            ''')
-
-    prints::
-
-        # any int or real number, returned as the appropriate type
-        100
-        [100]
-
-        -100
-        [-100]
-
-        +100
-        [100]
-
-        3.14159
-        [3.14159]
-
-        6.02e23
-        [6.02e+23]
-
-        1e-12
-        [1e-12]
-
-        # any int or real number, returned as float
-        100
-        [100.0]
-
-        -100
-        [-100.0]
-
-        +100
-        [100.0]
-
-        3.14159
-        [3.14159]
-
-        6.02e23
-        [6.02e+23]
-
-        1e-12
-        [1e-12]
-
-        # hex numbers
-        100
-        [256]
-
-        FF
-        [255]
-
-        # fractions
-        1/2
-        [0.5]
-
-        -3/4
-        [-0.75]
-
-        # mixed fractions
-        1
-        [1]
-
-        1/2
-        [0.5]
-
-        -3/4
-        [-0.75]
-
-        1-3/4
-        [1.75]
-
-        # uuid
-        12345678-1234-5678-1234-567812345678
-        [UUID('12345678-1234-5678-1234-567812345678')]
-    """
-
-    convert_to_integer = token_map(int)
-    """
-    Parse action for converting parsed integers to Python int
-    """
-
-    convert_to_float = token_map(float)
-    """
-    Parse action for converting parsed numbers to Python float
-    """
-
-    integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
-    """expression that parses an unsigned integer, returns an int"""
-
-    hex_integer = (
-        Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
-    )
-    """expression that parses a hexadecimal integer, returns an int"""
-
-    signed_integer = (
-        Regex(r"[+-]?\d+")
-        .set_name("signed integer")
-        .set_parse_action(convert_to_integer)
-    )
-    """expression that parses an integer with optional leading sign, returns an int"""
-
-    fraction = (
-        signed_integer().set_parse_action(convert_to_float)
-        + "/"
-        + signed_integer().set_parse_action(convert_to_float)
-    ).set_name("fraction")
-    """fractional expression of an integer divided by an integer, returns a float"""
-    fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
-
-    mixed_integer = (
-        fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
-    ).set_name("fraction or mixed integer-fraction")
-    """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
-    mixed_integer.add_parse_action(sum)
-
-    real = (
-        Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
-        .set_name("real number")
-        .set_parse_action(convert_to_float)
-    )
-    """expression that parses a floating point number and returns a float"""
-
-    sci_real = (
-        Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
-        .set_name("real number with scientific notation")
-        .set_parse_action(convert_to_float)
-    )
-    """expression that parses a floating point number with optional
-    scientific notation and returns a float"""
-
-    # streamlining this expression makes the docs nicer-looking
-    number = (sci_real | real | signed_integer).setName("number").streamline()
-    """any numeric expression, returns the corresponding Python type"""
-
-    fnumber = (
-        Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
-        .set_name("fnumber")
-        .set_parse_action(convert_to_float)
-    )
-    """any int or real number, returned as float"""
-
-    identifier = Word(identchars, identbodychars).set_name("identifier")
-    """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
-
-    ipv4_address = Regex(
-        r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
-    ).set_name("IPv4 address")
-    "IPv4 address (``0.0.0.0 - 255.255.255.255``)"
-
-    _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
-    _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
-        "full IPv6 address"
-    )
-    _short_ipv6_address = (
-        Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
-        + "::"
-        + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
-    ).set_name("short IPv6 address")
-    _short_ipv6_address.add_condition(
-        lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
-    )
-    _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
-    ipv6_address = Combine(
-        (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
-            "IPv6 address"
-        )
-    ).set_name("IPv6 address")
-    "IPv6 address (long, short, or mixed form)"
-
-    mac_address = Regex(
-        r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
-    ).set_name("MAC address")
-    "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
-
-    @staticmethod
-    def convert_to_date(fmt: str = "%Y-%m-%d"):
-        """
-        Helper to create a parse action for converting parsed date string to Python datetime.date
-
-        Params -
-        - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
-
-        Example::
-
-            date_expr = pyparsing_common.iso8601_date.copy()
-            date_expr.setParseAction(pyparsing_common.convertToDate())
-            print(date_expr.parseString("1999-12-31"))
-
-        prints::
-
-            [datetime.date(1999, 12, 31)]
-        """
-
-        def cvt_fn(ss, ll, tt):
-            try:
-                return datetime.strptime(tt[0], fmt).date()
-            except ValueError as ve:
-                raise ParseException(ss, ll, str(ve))
-
-        return cvt_fn
-
-    @staticmethod
-    def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
-        """Helper to create a parse action for converting parsed
-        datetime string to Python datetime.datetime
-
-        Params -
-        - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
-
-        Example::
-
-            dt_expr = pyparsing_common.iso8601_datetime.copy()
-            dt_expr.setParseAction(pyparsing_common.convertToDatetime())
-            print(dt_expr.parseString("1999-12-31T23:59:59.999"))
-
-        prints::
-
-            [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
-        """
-
-        def cvt_fn(s, l, t):
-            try:
-                return datetime.strptime(t[0], fmt)
-            except ValueError as ve:
-                raise ParseException(s, l, str(ve))
-
-        return cvt_fn
-
-    iso8601_date = Regex(
-        r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
-    ).set_name("ISO8601 date")
-    "ISO8601 date (``yyyy-mm-dd``)"
-
-    iso8601_datetime = Regex(
-        r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
-    ).set_name("ISO8601 datetime")
-    "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
-
-    uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
-    "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
-
-    _html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
-
-    @staticmethod
-    def strip_html_tags(s: str, l: int, tokens: ParseResults):
-        """Parse action to remove HTML tags from web page HTML source
-
-        Example::
-
-            # strip HTML links from normal text
-            text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
-            td, td_end = makeHTMLTags("TD")
-            table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
-            print(table_text.parseString(text).body)
-
-        Prints::
-
-            More info at the pyparsing wiki page
-        """
-        return pyparsing_common._html_stripper.transform_string(tokens[0])
-
-    _commasepitem = (
-        Combine(
-            OneOrMore(
-                ~Literal(",")
-                + ~LineEnd()
-                + Word(printables, exclude_chars=",")
-                + Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
-            )
-        )
-        .streamline()
-        .set_name("commaItem")
-    )
-    comma_separated_list = delimited_list(
-        Opt(quoted_string.copy() | _commasepitem, default="")
-    ).set_name("comma separated list")
-    """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
-
-    upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
-    """Parse action to convert tokens to upper case."""
-
-    downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
-    """Parse action to convert tokens to lower case."""
-
-    # fmt: off
-    url = Regex(
-        # https://mathiasbynens.be/demo/url-regex
-        # https://gist.github.com/dperini/729294
-        r"^" +
-        # protocol identifier (optional)
-        # short syntax // still required
-        r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
-        # user:pass BasicAuth (optional)
-        r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
-        r"(?P<host>" +
-        # IP address exclusion
-        # private & local networks
-        r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
-        r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
-        r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
-        # IP address dotted notation octets
-        # excludes loopback network 0.0.0.0
-        # excludes reserved space >= 224.0.0.0
-        # excludes network & broadcast addresses
-        # (first & last IP address of each class)
-        r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
-        r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
-        r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
-        r"|" +
-        # host & domain names, may end with dot
-        # can be replaced by a shortest alternative
-        # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
-        r"(?:" +
-        r"(?:" +
-        r"[a-z0-9\u00a1-\uffff]" +
-        r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
-        r")?" +
-        r"[a-z0-9\u00a1-\uffff]\." +
-        r")+" +
-        # TLD identifier name, may end with dot
-        r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
-        r")" +
-        # port number (optional)
-        r"(:(?P<port>\d{2,5}))?" +
-        # resource path (optional)
-        r"(?P<path>\/[^?# ]*)?" +
-        # query string (optional)
-        r"(\?(?P<query>[^#]*))?" +
-        # fragment (optional)
-        r"(#(?P<fragment>\S*))?" +
-        r"$"
-    ).set_name("url")
-    # fmt: on
-
-    # pre-PEP8 compatibility names
-    convertToInteger = convert_to_integer
-    convertToFloat = convert_to_float
-    convertToDate = convert_to_date
-    convertToDatetime = convert_to_datetime
-    stripHTMLTags = strip_html_tags
-    upcaseTokens = upcase_tokens
-    downcaseTokens = downcase_tokens
-
-
-_builtin_exprs = [
-    v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
-]
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/core.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/core.py
deleted file mode 100644
index 454bd57..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/core.py
+++ /dev/null
@@ -1,5812 +0,0 @@
-#
-# core.py
-#
-import os
-from typing import (
-    Optional as OptionalType,
-    Iterable as IterableType,
-    NamedTuple,
-    Union,
-    Callable,
-    Any,
-    Generator,
-    Tuple,
-    List,
-    TextIO,
-    Set,
-    Dict as DictType,
-    Sequence,
-)
-from abc import ABC, abstractmethod
-from enum import Enum
-import string
-import copy
-import warnings
-import re
-import sys
-from collections.abc import Iterable
-import traceback
-import types
-from operator import itemgetter
-from functools import wraps
-from threading import RLock
-from pathlib import Path
-
-from .util import (
-    _FifoCache,
-    _UnboundedCache,
-    __config_flags,
-    _collapse_string_to_ranges,
-    _escape_regex_range_chars,
-    _bslash,
-    _flatten,
-    LRUMemo as _LRUMemo,
-    UnboundedMemo as _UnboundedMemo,
-)
-from .exceptions import *
-from .actions import *
-from .results import ParseResults, _ParseResultsWithOffset
-from .unicode import pyparsing_unicode
-
-_MAX_INT = sys.maxsize
-str_type: Tuple[type, ...] = (str, bytes)
-
-#
-# Copyright (c) 2003-2022  Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-
-if sys.version_info >= (3, 8):
-    from functools import cached_property
-else:
-
-    class cached_property:
-        def __init__(self, func):
-            self._func = func
-
-        def __get__(self, instance, owner=None):
-            ret = instance.__dict__[self._func.__name__] = self._func(instance)
-            return ret
-
-
-class __compat__(__config_flags):
-    """
-    A cross-version compatibility configuration for pyparsing features that will be
-    released in a future version. By setting values in this configuration to True,
-    those features can be enabled in prior versions for compatibility development
-    and testing.
-
-    - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping
-      of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`;
-      maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1
-      behavior
-    """
-
-    _type_desc = "compatibility"
-
-    collect_all_And_tokens = True
-
-    _all_names = [__ for __ in locals() if not __.startswith("_")]
-    _fixed_names = """
-        collect_all_And_tokens
-        """.split()
-
-
-class __diag__(__config_flags):
-    _type_desc = "diagnostic"
-
-    warn_multiple_tokens_in_named_alternation = False
-    warn_ungrouped_named_tokens_in_collection = False
-    warn_name_set_on_empty_Forward = False
-    warn_on_parse_using_empty_Forward = False
-    warn_on_assignment_to_Forward = False
-    warn_on_multiple_string_args_to_oneof = False
-    warn_on_match_first_with_lshift_operator = False
-    enable_debug_on_named_expressions = False
-
-    _all_names = [__ for __ in locals() if not __.startswith("_")]
-    _warning_names = [name for name in _all_names if name.startswith("warn")]
-    _debug_names = [name for name in _all_names if name.startswith("enable_debug")]
-
-    @classmethod
-    def enable_all_warnings(cls) -> None:
-        for name in cls._warning_names:
-            cls.enable(name)
-
-
-class Diagnostics(Enum):
-    """
-    Diagnostic configuration (all default to disabled)
-    - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results
-      name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions
-    - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results
-      name is defined on a containing expression with ungrouped subexpressions that also
-      have results names
-    - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined
-      with a results name, but has no contents defined
-    - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is
-      defined in a grammar but has never had an expression attached to it
-    - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined
-      but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'``
-    - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is
-      incorrectly called with multiple str arguments
-    - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent
-      calls to :class:`ParserElement.set_name`
-
-    Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`.
-    All warnings can be enabled by calling :class:`enable_all_warnings`.
-    """
-
-    warn_multiple_tokens_in_named_alternation = 0
-    warn_ungrouped_named_tokens_in_collection = 1
-    warn_name_set_on_empty_Forward = 2
-    warn_on_parse_using_empty_Forward = 3
-    warn_on_assignment_to_Forward = 4
-    warn_on_multiple_string_args_to_oneof = 5
-    warn_on_match_first_with_lshift_operator = 6
-    enable_debug_on_named_expressions = 7
-
-
-def enable_diag(diag_enum: Diagnostics) -> None:
-    """
-    Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
-    """
-    __diag__.enable(diag_enum.name)
-
-
-def disable_diag(diag_enum: Diagnostics) -> None:
-    """
-    Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
-    """
-    __diag__.disable(diag_enum.name)
-
-
-def enable_all_warnings() -> None:
-    """
-    Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
-    """
-    __diag__.enable_all_warnings()
-
-
-# hide abstract class
-del __config_flags
-
-
-def _should_enable_warnings(
-    cmd_line_warn_options: IterableType[str], warn_env_var: OptionalType[str]
-) -> bool:
-    enable = bool(warn_env_var)
-    for warn_opt in cmd_line_warn_options:
-        w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split(
-            ":"
-        )[:5]
-        if not w_action.lower().startswith("i") and (
-            not (w_message or w_category or w_module) or w_module == "pyparsing"
-        ):
-            enable = True
-        elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""):
-            enable = False
-    return enable
-
-
-if _should_enable_warnings(
-    sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS")
-):
-    enable_all_warnings()
-
-
-# build list of single arg builtins, that can be used as parse actions
-_single_arg_builtins = {
-    sum,
-    len,
-    sorted,
-    reversed,
-    list,
-    tuple,
-    set,
-    any,
-    all,
-    min,
-    max,
-}
-
-_generatorType = types.GeneratorType
-ParseAction = Union[
-    Callable[[], Any],
-    Callable[[ParseResults], Any],
-    Callable[[int, ParseResults], Any],
-    Callable[[str, int, ParseResults], Any],
-]
-ParseCondition = Union[
-    Callable[[], bool],
-    Callable[[ParseResults], bool],
-    Callable[[int, ParseResults], bool],
-    Callable[[str, int, ParseResults], bool],
-]
-ParseFailAction = Callable[[str, int, "ParserElement", Exception], None]
-DebugStartAction = Callable[[str, int, "ParserElement", bool], None]
-DebugSuccessAction = Callable[
-    [str, int, int, "ParserElement", ParseResults, bool], None
-]
-DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None]
-
-
-alphas = string.ascii_uppercase + string.ascii_lowercase
-identchars = pyparsing_unicode.Latin1.identchars
-identbodychars = pyparsing_unicode.Latin1.identbodychars
-nums = "0123456789"
-hexnums = nums + "ABCDEFabcdef"
-alphanums = alphas + nums
-printables = "".join([c for c in string.printable if c not in string.whitespace])
-
-_trim_arity_call_line: traceback.StackSummary = None
-
-
-def _trim_arity(func, max_limit=3):
-    """decorator to trim function calls to match the arity of the target"""
-    global _trim_arity_call_line
-
-    if func in _single_arg_builtins:
-        return lambda s, l, t: func(t)
-
-    limit = 0
-    found_arity = False
-
-    def extract_tb(tb, limit=0):
-        frames = traceback.extract_tb(tb, limit=limit)
-        frame_summary = frames[-1]
-        return [frame_summary[:2]]
-
-    # synthesize what would be returned by traceback.extract_stack at the call to
-    # user's parse action 'func', so that we don't incur call penalty at parse time
-
-    # fmt: off
-    LINE_DIFF = 7
-    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
-    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
-    _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1])
-    pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF)
-
-    def wrapper(*args):
-        nonlocal found_arity, limit
-        while 1:
-            try:
-                ret = func(*args[limit:])
-                found_arity = True
-                return ret
-            except TypeError as te:
-                # re-raise TypeErrors if they did not come from our arity testing
-                if found_arity:
-                    raise
-                else:
-                    tb = te.__traceback__
-                    trim_arity_type_error = (
-                        extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth
-                    )
-                    del tb
-
-                    if trim_arity_type_error:
-                        if limit < max_limit:
-                            limit += 1
-                            continue
-
-                    raise
-    # fmt: on
-
-    # copy func name to wrapper for sensible debug output
-    # (can't use functools.wraps, since that messes with function signature)
-    func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
-    wrapper.__name__ = func_name
-    wrapper.__doc__ = func.__doc__
-
-    return wrapper
-
-
-def condition_as_parse_action(
-    fn: ParseCondition, message: str = None, fatal: bool = False
-) -> ParseAction:
-    """
-    Function to convert a simple predicate function that returns ``True`` or ``False``
-    into a parse action. Can be used in places when a parse action is required
-    and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition
-    to an operator level in :class:`infix_notation`).
-
-    Optional keyword arguments:
-
-    - ``message`` - define a custom message to be used in the raised exception
-    - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately;
-      otherwise will raise :class:`ParseException`
-
-    """
-    msg = message if message is not None else "failed user-defined condition"
-    exc_type = ParseFatalException if fatal else ParseException
-    fn = _trim_arity(fn)
-
-    @wraps(fn)
-    def pa(s, l, t):
-        if not bool(fn(s, l, t)):
-            raise exc_type(s, l, msg)
-
-    return pa
-
-
-def _default_start_debug_action(
-    instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False
-):
-    cache_hit_str = "*" if cache_hit else ""
-    print(
-        (
-            "{}Match {} at loc {}({},{})\n  {}\n  {}^".format(
-                cache_hit_str,
-                expr,
-                loc,
-                lineno(loc, instring),
-                col(loc, instring),
-                line(loc, instring),
-                " " * (col(loc, instring) - 1),
-            )
-        )
-    )
-
-
-def _default_success_debug_action(
-    instring: str,
-    startloc: int,
-    endloc: int,
-    expr: "ParserElement",
-    toks: ParseResults,
-    cache_hit: bool = False,
-):
-    cache_hit_str = "*" if cache_hit else ""
-    print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list()))
-
-
-def _default_exception_debug_action(
-    instring: str,
-    loc: int,
-    expr: "ParserElement",
-    exc: Exception,
-    cache_hit: bool = False,
-):
-    cache_hit_str = "*" if cache_hit else ""
-    print(
-        "{}Match {} failed, {} raised: {}".format(
-            cache_hit_str, expr, type(exc).__name__, exc
-        )
-    )
-
-
-def null_debug_action(*args):
-    """'Do-nothing' debug action, to suppress debugging output during parsing."""
-
-
-class ParserElement(ABC):
-    """Abstract base level parser element class."""
-
-    DEFAULT_WHITE_CHARS: str = " \n\t\r"
-    verbose_stacktrace: bool = False
-    _literalStringClass: OptionalType[type] = None
-
-    @staticmethod
-    def set_default_whitespace_chars(chars: str) -> None:
-        r"""
-        Overrides the default whitespace chars
-
-        Example::
-
-            # default whitespace chars are space, <TAB> and newline
-            OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
-
-            # change to just treat newline as significant
-            ParserElement.set_default_whitespace_chars(" \t")
-            OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl")  # -> ['abc', 'def']
-        """
-        ParserElement.DEFAULT_WHITE_CHARS = chars
-
-        # update whitespace all parse expressions defined in this module
-        for expr in _builtin_exprs:
-            if expr.copyDefaultWhiteChars:
-                expr.whiteChars = set(chars)
-
-    @staticmethod
-    def inline_literals_using(cls: type) -> None:
-        """
-        Set class to be used for inclusion of string literals into a parser.
-
-        Example::
-
-            # default literal class used is Literal
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-            date_str.parse_string("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
-
-
-            # change to Suppress
-            ParserElement.inline_literals_using(Suppress)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-            date_str.parse_string("1999/12/31")  # -> ['1999', '12', '31']
-        """
-        ParserElement._literalStringClass = cls
-
-    class DebugActions(NamedTuple):
-        debug_try: OptionalType[DebugStartAction]
-        debug_match: OptionalType[DebugSuccessAction]
-        debug_fail: OptionalType[DebugExceptionAction]
-
-    def __init__(self, savelist: bool = False):
-        self.parseAction: List[ParseAction] = list()
-        self.failAction: OptionalType[ParseFailAction] = None
-        self.customName = None
-        self._defaultName = None
-        self.resultsName = None
-        self.saveAsList = savelist
-        self.skipWhitespace = True
-        self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
-        self.copyDefaultWhiteChars = True
-        # used when checking for left-recursion
-        self.mayReturnEmpty = False
-        self.keepTabs = False
-        self.ignoreExprs: List["ParserElement"] = list()
-        self.debug = False
-        self.streamlined = False
-        # optimize exception handling for subclasses that don't advance parse index
-        self.mayIndexError = True
-        self.errmsg = ""
-        # mark results names as modal (report only last) or cumulative (list all)
-        self.modalResults = True
-        # custom debug actions
-        self.debugActions = self.DebugActions(None, None, None)
-        # avoid redundant calls to preParse
-        self.callPreparse = True
-        self.callDuringTry = False
-        self.suppress_warnings_: List[Diagnostics] = []
-
-    def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement":
-        """
-        Suppress warnings emitted for a particular diagnostic on this expression.
-
-        Example::
-
-            base = pp.Forward()
-            base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward)
-
-            # statement would normally raise a warning, but is now suppressed
-            print(base.parseString("x"))
-
-        """
-        self.suppress_warnings_.append(warning_type)
-        return self
-
-    def copy(self) -> "ParserElement":
-        """
-        Make a copy of this :class:`ParserElement`.  Useful for defining
-        different parse actions for the same parsing pattern, using copies of
-        the original parse element.
-
-        Example::
-
-            integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
-            integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K")
-            integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
-
-            print(OneOrMore(integerK | integerM | integer).parse_string("5K 100 640K 256M"))
-
-        prints::
-
-            [5120, 100, 655360, 268435456]
-
-        Equivalent form of ``expr.copy()`` is just ``expr()``::
-
-            integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
-        """
-        cpy = copy.copy(self)
-        cpy.parseAction = self.parseAction[:]
-        cpy.ignoreExprs = self.ignoreExprs[:]
-        if self.copyDefaultWhiteChars:
-            cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
-        return cpy
-
-    def set_results_name(
-        self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False
-    ) -> "ParserElement":
-        """
-        Define name for referencing matching tokens as a nested attribute
-        of the returned parse results.
-
-        Normally, results names are assigned as you would assign keys in a dict:
-        any existing value is overwritten by later values. If it is necessary to
-        keep all values captured for a particular results name, call ``set_results_name``
-        with ``list_all_matches`` = True.
-
-        NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object;
-        this is so that the client can define a basic element, such as an
-        integer, and reference it in multiple places with different names.
-
-        You can also set results names using the abbreviated syntax,
-        ``expr("name")`` in place of ``expr.set_results_name("name")``
-        - see :class:`__call__`. If ``list_all_matches`` is required, use
-        ``expr("name*")``.
-
-        Example::
-
-            date_str = (integer.set_results_name("year") + '/'
-                        + integer.set_results_name("month") + '/'
-                        + integer.set_results_name("day"))
-
-            # equivalent form:
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-        """
-        listAllMatches = listAllMatches or list_all_matches
-        return self._setResultsName(name, listAllMatches)
-
-    def _setResultsName(self, name, listAllMatches=False):
-        if name is None:
-            return self
-        newself = self.copy()
-        if name.endswith("*"):
-            name = name[:-1]
-            listAllMatches = True
-        newself.resultsName = name
-        newself.modalResults = not listAllMatches
-        return newself
-
-    def set_break(self, break_flag: bool = True) -> "ParserElement":
-        """
-        Method to invoke the Python pdb debugger when this element is
-        about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to
-        disable.
-        """
-        if break_flag:
-            _parseMethod = self._parse
-
-            def breaker(instring, loc, doActions=True, callPreParse=True):
-                import pdb
-
-                # this call to pdb.set_trace() is intentional, not a checkin error
-                pdb.set_trace()
-                return _parseMethod(instring, loc, doActions, callPreParse)
-
-            breaker._originalParseMethod = _parseMethod
-            self._parse = breaker
-        else:
-            if hasattr(self._parse, "_originalParseMethod"):
-                self._parse = self._parse._originalParseMethod
-        return self
-
-    def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
-        """
-        Define one or more actions to perform when successfully matching parse element definition.
-
-        Parse actions can be called to perform data conversions, do extra validation,
-        update external data structures, or enhance or replace the parsed tokens.
-        Each parse action ``fn`` is a callable method with 0-3 arguments, called as
-        ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
-
-        - s   = the original string being parsed (see note below)
-        - loc = the location of the matching substring
-        - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
-
-        The parsed tokens are passed to the parse action as ParseResults. They can be
-        modified in place using list-style append, extend, and pop operations to update
-        the parsed list elements; and with dictionary-style item set and del operations
-        to add, update, or remove any named results. If the tokens are modified in place,
-        it is not necessary to return them with a return statement.
-
-        Parse actions can also completely replace the given tokens, with another ``ParseResults``
-        object, or with some entirely different object (common for parse actions that perform data
-        conversions). A convenient way to build a new parse result is to define the values
-        using a dict, and then create the return value using :class:`ParseResults.from_dict`.
-
-        If None is passed as the ``fn`` parse action, all previously added parse actions for this
-        expression are cleared.
-
-        Optional keyword arguments:
-
-        - call_during_try = (default= ``False``) indicate if parse action should be run during
-          lookaheads and alternate testing. For parse actions that have side effects, it is
-          important to only call the parse action once it is determined that it is being
-          called as part of a successful parse. For parse actions that perform additional
-          validation, then call_during_try should be passed as True, so that the validation
-          code is included in the preliminary "try" parses.
-
-        Note: the default parsing behavior is to expand tabs in the input string
-        before starting the parsing process.  See :class:`parse_string` for more
-        information on parsing strings containing ``<TAB>`` s, and suggested
-        methods to maintain a consistent view of the parsed string, the parse
-        location, and line and column positions within the parsed string.
-
-        Example::
-
-            # parse dates in the form YYYY/MM/DD
-
-            # use parse action to convert toks from str to int at parse time
-            def convert_to_int(toks):
-                return int(toks[0])
-
-            # use a parse action to verify that the date is a valid date
-            def is_valid_date(instring, loc, toks):
-                from datetime import date
-                year, month, day = toks[::2]
-                try:
-                    date(year, month, day)
-                except ValueError:
-                    raise ParseException(instring, loc, "invalid date given")
-
-            integer = Word(nums)
-            date_str = integer + '/' + integer + '/' + integer
-
-            # add parse actions
-            integer.set_parse_action(convert_to_int)
-            date_str.set_parse_action(is_valid_date)
-
-            # note that integer fields are now ints, not strings
-            date_str.run_tests('''
-                # successful parse - note that integer fields were converted to ints
-                1999/12/31
-
-                # fail - invalid date
-                1999/13/31
-                ''')
-        """
-        if list(fns) == [None]:
-            self.parseAction = []
-        else:
-            if not all(callable(fn) for fn in fns):
-                raise TypeError("parse actions must be callable")
-            self.parseAction = [_trim_arity(fn) for fn in fns]
-            self.callDuringTry = kwargs.get(
-                "call_during_try", kwargs.get("callDuringTry", False)
-            )
-        return self
-
-    def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
-        """
-        Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`.
-
-        See examples in :class:`copy`.
-        """
-        self.parseAction += [_trim_arity(fn) for fn in fns]
-        self.callDuringTry = self.callDuringTry or kwargs.get(
-            "call_during_try", kwargs.get("callDuringTry", False)
-        )
-        return self
-
-    def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement":
-        """Add a boolean predicate function to expression's list of parse actions. See
-        :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``,
-        functions passed to ``add_condition`` need to return boolean success/fail of the condition.
-
-        Optional keyword arguments:
-
-        - message = define a custom message to be used in the raised exception
-        - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise
-          ParseException
-        - call_during_try = boolean to indicate if this method should be called during internal tryParse calls,
-          default=False
-
-        Example::
-
-            integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
-            year_int = integer.copy()
-            year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
-            date_str = year_int + '/' + integer + '/' + integer
-
-            result = date_str.parse_string("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0),
-                                                                         (line:1, col:1)
-        """
-        for fn in fns:
-            self.parseAction.append(
-                condition_as_parse_action(
-                    fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False)
-                )
-            )
-
-        self.callDuringTry = self.callDuringTry or kwargs.get(
-            "call_during_try", kwargs.get("callDuringTry", False)
-        )
-        return self
-
-    def set_fail_action(self, fn: ParseFailAction) -> "ParserElement":
-        """
-        Define action to perform if parsing fails at this expression.
-        Fail acton fn is a callable function that takes the arguments
-        ``fn(s, loc, expr, err)`` where:
-
-        - s = string being parsed
-        - loc = location where expression match was attempted and failed
-        - expr = the parse expression that failed
-        - err = the exception thrown
-
-        The function returns no value.  It may throw :class:`ParseFatalException`
-        if it is desired to stop parsing immediately."""
-        self.failAction = fn
-        return self
-
-    def _skipIgnorables(self, instring, loc):
-        exprsFound = True
-        while exprsFound:
-            exprsFound = False
-            for e in self.ignoreExprs:
-                try:
-                    while 1:
-                        loc, dummy = e._parse(instring, loc)
-                        exprsFound = True
-                except ParseException:
-                    pass
-        return loc
-
-    def preParse(self, instring, loc):
-        if self.ignoreExprs:
-            loc = self._skipIgnorables(instring, loc)
-
-        if self.skipWhitespace:
-            instrlen = len(instring)
-            white_chars = self.whiteChars
-            while loc < instrlen and instring[loc] in white_chars:
-                loc += 1
-
-        return loc
-
-    def parseImpl(self, instring, loc, doActions=True):
-        return loc, []
-
-    def postParse(self, instring, loc, tokenlist):
-        return tokenlist
-
-    # @profile
-    def _parseNoCache(
-        self, instring, loc, doActions=True, callPreParse=True
-    ) -> Tuple[int, ParseResults]:
-        TRY, MATCH, FAIL = 0, 1, 2
-        debugging = self.debug  # and doActions)
-        len_instring = len(instring)
-
-        if debugging or self.failAction:
-            # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring)))
-            try:
-                if callPreParse and self.callPreparse:
-                    pre_loc = self.preParse(instring, loc)
-                else:
-                    pre_loc = loc
-                tokens_start = pre_loc
-                if self.debugActions.debug_try:
-                    self.debugActions.debug_try(instring, tokens_start, self, False)
-                if self.mayIndexError or pre_loc >= len_instring:
-                    try:
-                        loc, tokens = self.parseImpl(instring, pre_loc, doActions)
-                    except IndexError:
-                        raise ParseException(instring, len_instring, self.errmsg, self)
-                else:
-                    loc, tokens = self.parseImpl(instring, pre_loc, doActions)
-            except Exception as err:
-                # print("Exception raised:", err)
-                if self.debugActions.debug_fail:
-                    self.debugActions.debug_fail(
-                        instring, tokens_start, self, err, False
-                    )
-                if self.failAction:
-                    self.failAction(instring, tokens_start, self, err)
-                raise
-        else:
-            if callPreParse and self.callPreparse:
-                pre_loc = self.preParse(instring, loc)
-            else:
-                pre_loc = loc
-            tokens_start = pre_loc
-            if self.mayIndexError or pre_loc >= len_instring:
-                try:
-                    loc, tokens = self.parseImpl(instring, pre_loc, doActions)
-                except IndexError:
-                    raise ParseException(instring, len_instring, self.errmsg, self)
-            else:
-                loc, tokens = self.parseImpl(instring, pre_loc, doActions)
-
-        tokens = self.postParse(instring, loc, tokens)
-
-        ret_tokens = ParseResults(
-            tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
-        )
-        if self.parseAction and (doActions or self.callDuringTry):
-            if debugging:
-                try:
-                    for fn in self.parseAction:
-                        try:
-                            tokens = fn(instring, tokens_start, ret_tokens)
-                        except IndexError as parse_action_exc:
-                            exc = ParseException("exception raised in parse action")
-                            raise exc from parse_action_exc
-
-                        if tokens is not None and tokens is not ret_tokens:
-                            ret_tokens = ParseResults(
-                                tokens,
-                                self.resultsName,
-                                asList=self.saveAsList
-                                and isinstance(tokens, (ParseResults, list)),
-                                modal=self.modalResults,
-                            )
-                except Exception as err:
-                    # print "Exception raised in user parse action:", err
-                    if self.debugActions.debug_fail:
-                        self.debugActions.debug_fail(
-                            instring, tokens_start, self, err, False
-                        )
-                    raise
-            else:
-                for fn in self.parseAction:
-                    try:
-                        tokens = fn(instring, tokens_start, ret_tokens)
-                    except IndexError as parse_action_exc:
-                        exc = ParseException("exception raised in parse action")
-                        raise exc from parse_action_exc
-
-                    if tokens is not None and tokens is not ret_tokens:
-                        ret_tokens = ParseResults(
-                            tokens,
-                            self.resultsName,
-                            asList=self.saveAsList
-                            and isinstance(tokens, (ParseResults, list)),
-                            modal=self.modalResults,
-                        )
-        if debugging:
-            # print("Matched", self, "->", ret_tokens.as_list())
-            if self.debugActions.debug_match:
-                self.debugActions.debug_match(
-                    instring, tokens_start, loc, self, ret_tokens, False
-                )
-
-        return loc, ret_tokens
-
-    def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int:
-        try:
-            return self._parse(instring, loc, doActions=False)[0]
-        except ParseFatalException:
-            if raise_fatal:
-                raise
-            raise ParseException(instring, loc, self.errmsg, self)
-
-    def can_parse_next(self, instring: str, loc: int) -> bool:
-        try:
-            self.try_parse(instring, loc)
-        except (ParseException, IndexError):
-            return False
-        else:
-            return True
-
-    # cache for left-recursion in Forward references
-    recursion_lock = RLock()
-    recursion_memos: DictType[
-        Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]]
-    ] = {}
-
-    # argument cache for optimizing repeated calls when backtracking through recursive expressions
-    packrat_cache = (
-        {}
-    )  # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail
-    packrat_cache_lock = RLock()
-    packrat_cache_stats = [0, 0]
-
-    # this method gets repeatedly called during backtracking with the same arguments -
-    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
-    def _parseCache(
-        self, instring, loc, doActions=True, callPreParse=True
-    ) -> Tuple[int, ParseResults]:
-        HIT, MISS = 0, 1
-        TRY, MATCH, FAIL = 0, 1, 2
-        lookup = (self, instring, loc, callPreParse, doActions)
-        with ParserElement.packrat_cache_lock:
-            cache = ParserElement.packrat_cache
-            value = cache.get(lookup)
-            if value is cache.not_in_cache:
-                ParserElement.packrat_cache_stats[MISS] += 1
-                try:
-                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
-                except ParseBaseException as pe:
-                    # cache a copy of the exception, without the traceback
-                    cache.set(lookup, pe.__class__(*pe.args))
-                    raise
-                else:
-                    cache.set(lookup, (value[0], value[1].copy(), loc))
-                    return value
-            else:
-                ParserElement.packrat_cache_stats[HIT] += 1
-                if self.debug and self.debugActions.debug_try:
-                    try:
-                        self.debugActions.debug_try(instring, loc, self, cache_hit=True)
-                    except TypeError:
-                        pass
-                if isinstance(value, Exception):
-                    if self.debug and self.debugActions.debug_fail:
-                        try:
-                            self.debugActions.debug_fail(
-                                instring, loc, self, value, cache_hit=True
-                            )
-                        except TypeError:
-                            pass
-                    raise value
-
-                loc_, result, endloc = value[0], value[1].copy(), value[2]
-                if self.debug and self.debugActions.debug_match:
-                    try:
-                        self.debugActions.debug_match(
-                            instring, loc_, endloc, self, result, cache_hit=True
-                        )
-                    except TypeError:
-                        pass
-
-                return loc_, result
-
-    _parse = _parseNoCache
-
-    @staticmethod
-    def reset_cache() -> None:
-        ParserElement.packrat_cache.clear()
-        ParserElement.packrat_cache_stats[:] = [0] * len(
-            ParserElement.packrat_cache_stats
-        )
-        ParserElement.recursion_memos.clear()
-
-    _packratEnabled = False
-    _left_recursion_enabled = False
-
-    @staticmethod
-    def disable_memoization() -> None:
-        """
-        Disables active Packrat or Left Recursion parsing and their memoization
-
-        This method also works if neither Packrat nor Left Recursion are enabled.
-        This makes it safe to call before activating Packrat nor Left Recursion
-        to clear any previous settings.
-        """
-        ParserElement.reset_cache()
-        ParserElement._left_recursion_enabled = False
-        ParserElement._packratEnabled = False
-        ParserElement._parse = ParserElement._parseNoCache
-
-    @staticmethod
-    def enable_left_recursion(
-        cache_size_limit: OptionalType[int] = None, *, force=False
-    ) -> None:
-        """
-        Enables "bounded recursion" parsing, which allows for both direct and indirect
-        left-recursion. During parsing, left-recursive :class:`Forward` elements are
-        repeatedly matched with a fixed recursion depth that is gradually increased
-        until finding the longest match.
-
-        Example::
-
-            import pyparsing as pp
-            pp.ParserElement.enable_left_recursion()
-
-            E = pp.Forward("E")
-            num = pp.Word(pp.nums)
-            # match `num`, or `num '+' num`, or `num '+' num '+' num`, ...
-            E <<= E + '+' - num | num
-
-            print(E.parse_string("1+2+3"))
-
-        Recursion search naturally memoizes matches of ``Forward`` elements and may
-        thus skip reevaluation of parse actions during backtracking. This may break
-        programs with parse actions which rely on strict ordering of side-effects.
-
-        Parameters:
-
-        - cache_size_limit - (default=``None``) - memoize at most this many
-          ``Forward`` elements during matching; if ``None`` (the default),
-          memoize all ``Forward`` elements.
-
-        Bounded Recursion parsing works similar but not identical to Packrat parsing,
-        thus the two cannot be used together. Use ``force=True`` to disable any
-        previous, conflicting settings.
-        """
-        if force:
-            ParserElement.disable_memoization()
-        elif ParserElement._packratEnabled:
-            raise RuntimeError("Packrat and Bounded Recursion are not compatible")
-        if cache_size_limit is None:
-            ParserElement.recursion_memos = _UnboundedMemo()
-        elif cache_size_limit > 0:
-            ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit)
-        else:
-            raise NotImplementedError("Memo size of %s" % cache_size_limit)
-        ParserElement._left_recursion_enabled = True
-
-    @staticmethod
-    def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None:
-        """
-        Enables "packrat" parsing, which adds memoizing to the parsing logic.
-        Repeated parse attempts at the same string location (which happens
-        often in many complex grammars) can immediately return a cached value,
-        instead of re-executing parsing/validating code.  Memoizing is done of
-        both valid results and parsing exceptions.
-
-        Parameters:
-
-        - cache_size_limit - (default= ``128``) - if an integer value is provided
-          will limit the size of the packrat cache; if None is passed, then
-          the cache size will be unbounded; if 0 is passed, the cache will
-          be effectively disabled.
-
-        This speedup may break existing programs that use parse actions that
-        have side-effects.  For this reason, packrat parsing is disabled when
-        you first import pyparsing.  To activate the packrat feature, your
-        program must call the class method :class:`ParserElement.enable_packrat`.
-        For best results, call ``enable_packrat()`` immediately after
-        importing pyparsing.
-
-        Example::
-
-            import pyparsing
-            pyparsing.ParserElement.enable_packrat()
-
-        Packrat parsing works similar but not identical to Bounded Recursion parsing,
-        thus the two cannot be used together. Use ``force=True`` to disable any
-        previous, conflicting settings.
-        """
-        if force:
-            ParserElement.disable_memoization()
-        elif ParserElement._left_recursion_enabled:
-            raise RuntimeError("Packrat and Bounded Recursion are not compatible")
-        if not ParserElement._packratEnabled:
-            ParserElement._packratEnabled = True
-            if cache_size_limit is None:
-                ParserElement.packrat_cache = _UnboundedCache()
-            else:
-                ParserElement.packrat_cache = _FifoCache(cache_size_limit)
-            ParserElement._parse = ParserElement._parseCache
-
-    def parse_string(
-        self, instring: str, parse_all: bool = False, *, parseAll: bool = False
-    ) -> ParseResults:
-        """
-        Parse a string with respect to the parser definition. This function is intended as the primary interface to the
-        client code.
-
-        :param instring: The input string to be parsed.
-        :param parse_all: If set, the entire input string must match the grammar.
-        :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release.
-        :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar.
-        :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or
-          an object with attributes if the given parser includes results names.
-
-        If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This
-        is also equivalent to ending the grammar with :class:`StringEnd`().
-
-        To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are
-        converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string
-        contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string
-        being parsed, one can ensure a consistent view of the input string by doing one of the following:
-
-        - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`),
-        - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the
-          parse action's ``s`` argument, or
-        - explicitly expand the tabs in your input string before calling ``parse_string``.
-
-        Examples:
-
-        By default, partial matches are OK.
-
-        >>> res = Word('a').parse_string('aaaaabaaa')
-        >>> print(res)
-        ['aaaaa']
-
-        The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children
-        directly to see more examples.
-
-        It raises an exception if parse_all flag is set and instring does not match the whole grammar.
-
-        >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True)
-        Traceback (most recent call last):
-        ...
-        pyparsing.ParseException: Expected end of text, found 'b'  (at char 5), (line:1, col:6)
-        """
-        parseAll = parse_all or parseAll
-
-        ParserElement.reset_cache()
-        if not self.streamlined:
-            self.streamline()
-        for e in self.ignoreExprs:
-            e.streamline()
-        if not self.keepTabs:
-            instring = instring.expandtabs()
-        try:
-            loc, tokens = self._parse(instring, 0)
-            if parseAll:
-                loc = self.preParse(instring, loc)
-                se = Empty() + StringEnd()
-                se._parse(instring, loc)
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clearing out pyparsing internal stack trace
-                raise exc.with_traceback(None)
-        else:
-            return tokens
-
-    def scan_string(
-        self,
-        instring: str,
-        max_matches: int = _MAX_INT,
-        overlap: bool = False,
-        *,
-        debug: bool = False,
-        maxMatches: int = _MAX_INT,
-    ) -> Generator[Tuple[ParseResults, int, int], None, None]:
-        """
-        Scan the input string for expression matches.  Each match will return the
-        matching tokens, start location, and end location.  May be called with optional
-        ``max_matches`` argument, to clip scanning after 'n' matches are found.  If
-        ``overlap`` is specified, then overlapping matches will be reported.
-
-        Note that the start and end locations are reported relative to the string
-        being parsed.  See :class:`parse_string` for more information on parsing
-        strings with embedded tabs.
-
-        Example::
-
-            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
-            print(source)
-            for tokens, start, end in Word(alphas).scan_string(source):
-                print(' '*start + '^'*(end-start))
-                print(' '*start + tokens[0])
-
-        prints::
-
-            sldjf123lsdjjkf345sldkjf879lkjsfd987
-            ^^^^^
-            sldjf
-                    ^^^^^^^
-                    lsdjjkf
-                              ^^^^^^
-                              sldkjf
-                                       ^^^^^^
-                                       lkjsfd
-        """
-        maxMatches = min(maxMatches, max_matches)
-        if not self.streamlined:
-            self.streamline()
-        for e in self.ignoreExprs:
-            e.streamline()
-
-        if not self.keepTabs:
-            instring = str(instring).expandtabs()
-        instrlen = len(instring)
-        loc = 0
-        preparseFn = self.preParse
-        parseFn = self._parse
-        ParserElement.resetCache()
-        matches = 0
-        try:
-            while loc <= instrlen and matches < maxMatches:
-                try:
-                    preloc = preparseFn(instring, loc)
-                    nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
-                except ParseException:
-                    loc = preloc + 1
-                else:
-                    if nextLoc > loc:
-                        matches += 1
-                        if debug:
-                            print(
-                                {
-                                    "tokens": tokens.asList(),
-                                    "start": preloc,
-                                    "end": nextLoc,
-                                }
-                            )
-                        yield tokens, preloc, nextLoc
-                        if overlap:
-                            nextloc = preparseFn(instring, loc)
-                            if nextloc > loc:
-                                loc = nextLoc
-                            else:
-                                loc += 1
-                        else:
-                            loc = nextLoc
-                    else:
-                        loc = preloc + 1
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc.with_traceback(None)
-
-    def transform_string(self, instring: str, *, debug: bool = False) -> str:
-        """
-        Extension to :class:`scan_string`, to modify matching text with modified tokens that may
-        be returned from a parse action.  To use ``transform_string``, define a grammar and
-        attach a parse action to it that modifies the returned token list.
-        Invoking ``transform_string()`` on a target string will then scan for matches,
-        and replace the matched text patterns according to the logic in the parse
-        action.  ``transform_string()`` returns the resulting transformed string.
-
-        Example::
-
-            wd = Word(alphas)
-            wd.set_parse_action(lambda toks: toks[0].title())
-
-            print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york."))
-
-        prints::
-
-            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
-        """
-        out: List[str] = []
-        lastE = 0
-        # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
-        # keep string locs straight between transform_string and scan_string
-        self.keepTabs = True
-        try:
-            for t, s, e in self.scan_string(instring, debug=debug):
-                out.append(instring[lastE:s])
-                if t:
-                    if isinstance(t, ParseResults):
-                        out += t.as_list()
-                    elif isinstance(t, Iterable) and not isinstance(t, str_type):
-                        out.extend(t)
-                    else:
-                        out.append(t)
-                lastE = e
-            out.append(instring[lastE:])
-            out = [o for o in out if o]
-            return "".join([str(s) for s in _flatten(out)])
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc.with_traceback(None)
-
-    def search_string(
-        self,
-        instring: str,
-        max_matches: int = _MAX_INT,
-        *,
-        debug: bool = False,
-        maxMatches: int = _MAX_INT,
-    ) -> ParseResults:
-        """
-        Another extension to :class:`scan_string`, simplifying the access to the tokens found
-        to match the given parse expression.  May be called with optional
-        ``max_matches`` argument, to clip searching after 'n' matches are found.
-
-        Example::
-
-            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
-            cap_word = Word(alphas.upper(), alphas.lower())
-
-            print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))
-
-            # the sum() builtin can be used to merge results into a single ParseResults object
-            print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")))
-
-        prints::
-
-            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
-            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
-        """
-        maxMatches = min(maxMatches, max_matches)
-        try:
-            return ParseResults(
-                [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)]
-            )
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc.with_traceback(None)
-
-    def split(
-        self,
-        instring: str,
-        maxsplit: int = _MAX_INT,
-        include_separators: bool = False,
-        *,
-        includeSeparators=False,
-    ) -> Generator[str, None, None]:
-        """
-        Generator method to split a string using the given expression as a separator.
-        May be called with optional ``maxsplit`` argument, to limit the number of splits;
-        and the optional ``include_separators`` argument (default= ``False``), if the separating
-        matching text should be included in the split results.
-
-        Example::
-
-            punc = one_of(list(".,;:/-!?"))
-            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
-
-        prints::
-
-            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
-        """
-        includeSeparators = includeSeparators or include_separators
-        last = 0
-        for t, s, e in self.scan_string(instring, max_matches=maxsplit):
-            yield instring[last:s]
-            if includeSeparators:
-                yield t[0]
-            last = e
-        yield instring[last:]
-
-    def __add__(self, other) -> "ParserElement":
-        """
-        Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement`
-        converts them to :class:`Literal`s by default.
-
-        Example::
-
-            greet = Word(alphas) + "," + Word(alphas) + "!"
-            hello = "Hello, World!"
-            print(hello, "->", greet.parse_string(hello))
-
-        prints::
-
-            Hello, World! -> ['Hello', ',', 'World', '!']
-
-        ``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
-
-            Literal('start') + ... + Literal('end')
-
-        is equivalent to:
-
-            Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
-
-        Note that the skipped text is returned with '_skipped' as a results name,
-        and to support having multiple skips in the same parser, the value returned is
-        a list of all skipped text.
-        """
-        if other is Ellipsis:
-            return _PendingSkip(self)
-
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            raise TypeError(
-                "Cannot combine element of type {} with ParserElement".format(
-                    type(other).__name__
-                )
-            )
-        return And([self, other])
-
-    def __radd__(self, other) -> "ParserElement":
-        """
-        Implementation of ``+`` operator when left operand is not a :class:`ParserElement`
-        """
-        if other is Ellipsis:
-            return SkipTo(self)("_skipped*") + self
-
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            raise TypeError(
-                "Cannot combine element of type {} with ParserElement".format(
-                    type(other).__name__
-                )
-            )
-        return other + self
-
-    def __sub__(self, other) -> "ParserElement":
-        """
-        Implementation of ``-`` operator, returns :class:`And` with error stop
-        """
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            raise TypeError(
-                "Cannot combine element of type {} with ParserElement".format(
-                    type(other).__name__
-                )
-            )
-        return self + And._ErrorStop() + other
-
-    def __rsub__(self, other) -> "ParserElement":
-        """
-        Implementation of ``-`` operator when left operand is not a :class:`ParserElement`
-        """
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            raise TypeError(
-                "Cannot combine element of type {} with ParserElement".format(
-                    type(other).__name__
-                )
-            )
-        return other - self
-
-    def __mul__(self, other) -> "ParserElement":
-        """
-        Implementation of ``*`` operator, allows use of ``expr * 3`` in place of
-        ``expr + expr + expr``.  Expressions may also be multiplied by a 2-integer
-        tuple, similar to ``{min, max}`` multipliers in regular expressions.  Tuples
-        may also include ``None`` as in:
-        - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
-             to ``expr*n + ZeroOrMore(expr)``
-             (read as "at least n instances of ``expr``")
-        - ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
-             (read as "0 to n instances of ``expr``")
-        - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
-        - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
-
-        Note that ``expr*(None, n)`` does not raise an exception if
-        more than n exprs exist in the input stream; that is,
-        ``expr*(None, n)`` does not enforce a maximum number of expr
-        occurrences.  If this behavior is desired, then write
-        ``expr*(None, n) + ~expr``
-        """
-        if other is Ellipsis:
-            other = (0, None)
-        elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
-            other = ((0,) + other[1:] + (None,))[:2]
-
-        if isinstance(other, int):
-            minElements, optElements = other, 0
-        elif isinstance(other, tuple):
-            other = tuple(o if o is not Ellipsis else None for o in other)
-            other = (other + (None, None))[:2]
-            if other[0] is None:
-                other = (0, other[1])
-            if isinstance(other[0], int) and other[1] is None:
-                if other[0] == 0:
-                    return ZeroOrMore(self)
-                if other[0] == 1:
-                    return OneOrMore(self)
-                else:
-                    return self * other[0] + ZeroOrMore(self)
-            elif isinstance(other[0], int) and isinstance(other[1], int):
-                minElements, optElements = other
-                optElements -= minElements
-            else:
-                raise TypeError(
-                    "cannot multiply ParserElement and ({}) objects".format(
-                        ",".join(type(item).__name__ for item in other)
-                    )
-                )
-        else:
-            raise TypeError(
-                "cannot multiply ParserElement and {} objects".format(
-                    type(other).__name__
-                )
-            )
-
-        if minElements < 0:
-            raise ValueError("cannot multiply ParserElement by negative value")
-        if optElements < 0:
-            raise ValueError(
-                "second tuple value must be greater or equal to first tuple value"
-            )
-        if minElements == optElements == 0:
-            return And([])
-
-        if optElements:
-
-            def makeOptionalList(n):
-                if n > 1:
-                    return Opt(self + makeOptionalList(n - 1))
-                else:
-                    return Opt(self)
-
-            if minElements:
-                if minElements == 1:
-                    ret = self + makeOptionalList(optElements)
-                else:
-                    ret = And([self] * minElements) + makeOptionalList(optElements)
-            else:
-                ret = makeOptionalList(optElements)
-        else:
-            if minElements == 1:
-                ret = self
-            else:
-                ret = And([self] * minElements)
-        return ret
-
-    def __rmul__(self, other) -> "ParserElement":
-        return self.__mul__(other)
-
-    def __or__(self, other) -> "ParserElement":
-        """
-        Implementation of ``|`` operator - returns :class:`MatchFirst`
-        """
-        if other is Ellipsis:
-            return _PendingSkip(self, must_skip=True)
-
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            raise TypeError(
-                "Cannot combine element of type {} with ParserElement".format(
-                    type(other).__name__
-                )
-            )
-        return MatchFirst([self, other])
-
-    def __ror__(self, other) -> "ParserElement":
-        """
-        Implementation of ``|`` operator when left operand is not a :class:`ParserElement`
-        """
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            raise TypeError(
-                "Cannot combine element of type {} with ParserElement".format(
-                    type(other).__name__
-                )
-            )
-        return other | self
-
-    def __xor__(self, other) -> "ParserElement":
-        """
-        Implementation of ``^`` operator - returns :class:`Or`
-        """
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            raise TypeError(
-                "Cannot combine element of type {} with ParserElement".format(
-                    type(other).__name__
-                )
-            )
-        return Or([self, other])
-
-    def __rxor__(self, other) -> "ParserElement":
-        """
-        Implementation of ``^`` operator when left operand is not a :class:`ParserElement`
-        """
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            raise TypeError(
-                "Cannot combine element of type {} with ParserElement".format(
-                    type(other).__name__
-                )
-            )
-        return other ^ self
-
-    def __and__(self, other) -> "ParserElement":
-        """
-        Implementation of ``&`` operator - returns :class:`Each`
-        """
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            raise TypeError(
-                "Cannot combine element of type {} with ParserElement".format(
-                    type(other).__name__
-                )
-            )
-        return Each([self, other])
-
-    def __rand__(self, other) -> "ParserElement":
-        """
-        Implementation of ``&`` operator when left operand is not a :class:`ParserElement`
-        """
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        if not isinstance(other, ParserElement):
-            raise TypeError(
-                "Cannot combine element of type {} with ParserElement".format(
-                    type(other).__name__
-                )
-            )
-        return other & self
-
-    def __invert__(self) -> "ParserElement":
-        """
-        Implementation of ``~`` operator - returns :class:`NotAny`
-        """
-        return NotAny(self)
-
-    # disable __iter__ to override legacy use of sequential access to __getitem__ to
-    # iterate over a sequence
-    __iter__ = None
-
-    def __getitem__(self, key):
-        """
-        use ``[]`` indexing notation as a short form for expression repetition:
-
-        - ``expr[n]`` is equivalent to ``expr*n``
-        - ``expr[m, n]`` is equivalent to ``expr*(m, n)``
-        - ``expr[n, ...]`` or ``expr[n,]`` is equivalent
-             to ``expr*n + ZeroOrMore(expr)``
-             (read as "at least n instances of ``expr``")
-        - ``expr[..., n]`` is equivalent to ``expr*(0, n)``
-             (read as "0 to n instances of ``expr``")
-        - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
-        - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
-
-        ``None`` may be used in place of ``...``.
-
-        Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
-        if more than ``n`` ``expr``s exist in the input stream.  If this behavior is
-        desired, then write ``expr[..., n] + ~expr``.
-        """
-
-        # convert single arg keys to tuples
-        try:
-            if isinstance(key, str_type):
-                key = (key,)
-            iter(key)
-        except TypeError:
-            key = (key, key)
-
-        if len(key) > 2:
-            raise TypeError(
-                "only 1 or 2 index arguments supported ({}{})".format(
-                    key[:5], "... [{}]".format(len(key)) if len(key) > 5 else ""
-                )
-            )
-
-        # clip to 2 elements
-        ret = self * tuple(key[:2])
-        return ret
-
-    def __call__(self, name: str = None) -> "ParserElement":
-        """
-        Shortcut for :class:`set_results_name`, with ``list_all_matches=False``.
-
-        If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be
-        passed as ``True``.
-
-        If ``name` is omitted, same as calling :class:`copy`.
-
-        Example::
-
-            # these are equivalent
-            userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno")
-            userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
-        """
-        if name is not None:
-            return self._setResultsName(name)
-        else:
-            return self.copy()
-
-    def suppress(self) -> "ParserElement":
-        """
-        Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
-        cluttering up returned output.
-        """
-        return Suppress(self)
-
-    def ignore_whitespace(self, recursive: bool = True) -> "ParserElement":
-        """
-        Enables the skipping of whitespace before matching the characters in the
-        :class:`ParserElement`'s defined pattern.
-
-        :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any)
-        """
-        self.skipWhitespace = True
-        return self
-
-    def leave_whitespace(self, recursive: bool = True) -> "ParserElement":
-        """
-        Disables the skipping of whitespace before matching the characters in the
-        :class:`ParserElement`'s defined pattern.  This is normally only used internally by
-        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
-
-        :param recursive: If true (the default), also disable whitespace skipping in child elements (if any)
-        """
-        self.skipWhitespace = False
-        return self
-
-    def set_whitespace_chars(
-        self, chars: Union[Set[str], str], copy_defaults: bool = False
-    ) -> "ParserElement":
-        """
-        Overrides the default whitespace chars
-        """
-        self.skipWhitespace = True
-        self.whiteChars = set(chars)
-        self.copyDefaultWhiteChars = copy_defaults
-        return self
-
-    def parse_with_tabs(self) -> "ParserElement":
-        """
-        Overrides default behavior to expand ``<TAB>`` s to spaces before parsing the input string.
-        Must be called before ``parse_string`` when the input grammar contains elements that
-        match ``<TAB>`` characters.
-        """
-        self.keepTabs = True
-        return self
-
-    def ignore(self, other: "ParserElement") -> "ParserElement":
-        """
-        Define expression to be ignored (e.g., comments) while doing pattern
-        matching; may be called repeatedly, to define multiple comment or other
-        ignorable patterns.
-
-        Example::
-
-            patt = OneOrMore(Word(alphas))
-            patt.parse_string('ablaj /* comment */ lskjd')
-            # -> ['ablaj']
-
-            patt.ignore(c_style_comment)
-            patt.parse_string('ablaj /* comment */ lskjd')
-            # -> ['ablaj', 'lskjd']
-        """
-        import typing
-
-        if isinstance(other, str_type):
-            other = Suppress(other)
-
-        if isinstance(other, Suppress):
-            if other not in self.ignoreExprs:
-                self.ignoreExprs.append(other)
-        else:
-            self.ignoreExprs.append(Suppress(other.copy()))
-        return self
-
-    def set_debug_actions(
-        self,
-        start_action: DebugStartAction,
-        success_action: DebugSuccessAction,
-        exception_action: DebugExceptionAction,
-    ) -> "ParserElement":
-        """
-        Customize display of debugging messages while doing pattern matching:
-
-        - ``start_action`` - method to be called when an expression is about to be parsed;
-          should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)``
-
-        - ``success_action`` - method to be called when an expression has successfully parsed;
-          should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)``
-
-        - ``exception_action`` - method to be called when expression fails to parse;
-          should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)``
-        """
-        self.debugActions = self.DebugActions(
-            start_action or _default_start_debug_action,
-            success_action or _default_success_debug_action,
-            exception_action or _default_exception_debug_action,
-        )
-        self.debug = True
-        return self
-
-    def set_debug(self, flag: bool = True) -> "ParserElement":
-        """
-        Enable display of debugging messages while doing pattern matching.
-        Set ``flag`` to ``True`` to enable, ``False`` to disable.
-
-        Example::
-
-            wd = Word(alphas).set_name("alphaword")
-            integer = Word(nums).set_name("numword")
-            term = wd | integer
-
-            # turn on debugging for wd
-            wd.set_debug()
-
-            OneOrMore(term).parse_string("abc 123 xyz 890")
-
-        prints::
-
-            Match alphaword at loc 0(1,1)
-            Matched alphaword -> ['abc']
-            Match alphaword at loc 3(1,4)
-            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
-            Match alphaword at loc 7(1,8)
-            Matched alphaword -> ['xyz']
-            Match alphaword at loc 11(1,12)
-            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
-            Match alphaword at loc 15(1,16)
-            Exception raised:Expected alphaword (at char 15), (line:1, col:16)
-
-        The output shown is that produced by the default debug actions - custom debug actions can be
-        specified using :class:`set_debug_actions`. Prior to attempting
-        to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
-        is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
-        message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression,
-        which makes debugging and exception messages easier to understand - for instance, the default
-        name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``.
-        """
-        if flag:
-            self.set_debug_actions(
-                _default_start_debug_action,
-                _default_success_debug_action,
-                _default_exception_debug_action,
-            )
-        else:
-            self.debug = False
-        return self
-
-    @property
-    def default_name(self) -> str:
-        if self._defaultName is None:
-            self._defaultName = self._generateDefaultName()
-        return self._defaultName
-
-    @abstractmethod
-    def _generateDefaultName(self):
-        """
-        Child classes must define this method, which defines how the ``default_name`` is set.
-        """
-
-    def set_name(self, name: str) -> "ParserElement":
-        """
-        Define name for this expression, makes debugging and exception messages clearer.
-        Example::
-            Word(nums).parse_string("ABC")  # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1)
-            Word(nums).set_name("integer").parse_string("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
-        """
-        self.customName = name
-        self.errmsg = "Expected " + self.name
-        if __diag__.enable_debug_on_named_expressions:
-            self.set_debug()
-        return self
-
-    @property
-    def name(self) -> str:
-        # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name
-        return self.customName if self.customName is not None else self.default_name
-
-    def __str__(self) -> str:
-        return self.name
-
-    def __repr__(self) -> str:
-        return str(self)
-
-    def streamline(self) -> "ParserElement":
-        self.streamlined = True
-        self._defaultName = None
-        return self
-
-    def recurse(self) -> Sequence["ParserElement"]:
-        return []
-
-    def _checkRecursion(self, parseElementList):
-        subRecCheckList = parseElementList[:] + [self]
-        for e in self.recurse():
-            e._checkRecursion(subRecCheckList)
-
-    def validate(self, validateTrace=None) -> None:
-        """
-        Check defined expressions for valid structure, check for infinite recursive definitions.
-        """
-        self._checkRecursion([])
-
-    def parse_file(
-        self,
-        file_or_filename: Union[str, Path, TextIO],
-        encoding: str = "utf-8",
-        parse_all: bool = False,
-        *,
-        parseAll: bool = False,
-    ) -> ParseResults:
-        """
-        Execute the parse expression on the given file or filename.
-        If a filename is specified (instead of a file object),
-        the entire file is opened, read, and closed before parsing.
-        """
-        parseAll = parseAll or parse_all
-        try:
-            file_contents = file_or_filename.read()
-        except AttributeError:
-            with open(file_or_filename, "r", encoding=encoding) as f:
-                file_contents = f.read()
-        try:
-            return self.parse_string(file_contents, parseAll)
-        except ParseBaseException as exc:
-            if ParserElement.verbose_stacktrace:
-                raise
-            else:
-                # catch and re-raise exception from here, clears out pyparsing internal stack trace
-                raise exc.with_traceback(None)
-
-    def __eq__(self, other):
-        if self is other:
-            return True
-        elif isinstance(other, str_type):
-            return self.matches(other, parse_all=True)
-        elif isinstance(other, ParserElement):
-            return vars(self) == vars(other)
-        return False
-
-    def __hash__(self):
-        return id(self)
-
-    def matches(
-        self, test_string: str, parse_all: bool = True, *, parseAll: bool = True
-    ) -> bool:
-        """
-        Method for quick testing of a parser against a test string. Good for simple
-        inline microtests of sub expressions while building up larger parser.
-
-        Parameters:
-        - ``test_string`` - to test against this expression for a match
-        - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
-
-        Example::
-
-            expr = Word(nums)
-            assert expr.matches("100")
-        """
-        parseAll = parseAll and parse_all
-        try:
-            self.parse_string(str(test_string), parse_all=parseAll)
-            return True
-        except ParseBaseException:
-            return False
-
-    def run_tests(
-        self,
-        tests: Union[str, List[str]],
-        parse_all: bool = True,
-        comment: OptionalType[Union["ParserElement", str]] = "#",
-        full_dump: bool = True,
-        print_results: bool = True,
-        failure_tests: bool = False,
-        post_parse: Callable[[str, ParseResults], str] = None,
-        file: OptionalType[TextIO] = None,
-        with_line_numbers: bool = False,
-        *,
-        parseAll: bool = True,
-        fullDump: bool = True,
-        printResults: bool = True,
-        failureTests: bool = False,
-        postParse: Callable[[str, ParseResults], str] = None,
-    ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]:
-        """
-        Execute the parse expression on a series of test strings, showing each
-        test, the parsed results or where the parse failed. Quick and easy way to
-        run a parse expression against a list of sample strings.
-
-        Parameters:
-        - ``tests`` - a list of separate test strings, or a multiline string of test strings
-        - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
-        - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test
-          string; pass None to disable comment filtering
-        - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline;
-          if False, only dump nested list
-        - ``print_results`` - (default= ``True``) prints test output to stdout
-        - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing
-        - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as
-          `fn(test_string, parse_results)` and returns a string to be added to the test output
-        - ``file`` - (default= ``None``) optional file-like object to which test output will be written;
-          if None, will default to ``sys.stdout``
-        - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers
-
-        Returns: a (success, results) tuple, where success indicates that all tests succeeded
-        (or failed if ``failure_tests`` is True), and the results contain a list of lines of each
-        test's output
-
-        Example::
-
-            number_expr = pyparsing_common.number.copy()
-
-            result = number_expr.run_tests('''
-                # unsigned integer
-                100
-                # negative integer
-                -100
-                # float with scientific notation
-                6.02e23
-                # integer with scientific notation
-                1e-12
-                ''')
-            print("Success" if result[0] else "Failed!")
-
-            result = number_expr.run_tests('''
-                # stray character
-                100Z
-                # missing leading digit before '.'
-                -.100
-                # too many '.'
-                3.14.159
-                ''', failure_tests=True)
-            print("Success" if result[0] else "Failed!")
-
-        prints::
-
-            # unsigned integer
-            100
-            [100]
-
-            # negative integer
-            -100
-            [-100]
-
-            # float with scientific notation
-            6.02e23
-            [6.02e+23]
-
-            # integer with scientific notation
-            1e-12
-            [1e-12]
-
-            Success
-
-            # stray character
-            100Z
-               ^
-            FAIL: Expected end of text (at char 3), (line:1, col:4)
-
-            # missing leading digit before '.'
-            -.100
-            ^
-            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
-
-            # too many '.'
-            3.14.159
-                ^
-            FAIL: Expected end of text (at char 4), (line:1, col:5)
-
-            Success
-
-        Each test string must be on a single line. If you want to test a string that spans multiple
-        lines, create a test like this::
-
-            expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines")
-
-        (Note that this is a raw string literal, you must include the leading ``'r'``.)
-        """
-        from .testing import pyparsing_test
-
-        parseAll = parseAll and parse_all
-        fullDump = fullDump and full_dump
-        printResults = printResults and print_results
-        failureTests = failureTests or failure_tests
-        postParse = postParse or post_parse
-        if isinstance(tests, str_type):
-            line_strip = type(tests).strip
-            tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()]
-        if isinstance(comment, str_type):
-            comment = Literal(comment)
-        if file is None:
-            file = sys.stdout
-        print_ = file.write
-
-        result: Union[ParseResults, Exception]
-        allResults = []
-        comments = []
-        success = True
-        NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string)
-        BOM = "\ufeff"
-        for t in tests:
-            if comment is not None and comment.matches(t, False) or comments and not t:
-                comments.append(
-                    pyparsing_test.with_line_numbers(t) if with_line_numbers else t
-                )
-                continue
-            if not t:
-                continue
-            out = [
-                "\n" + "\n".join(comments) if comments else "",
-                pyparsing_test.with_line_numbers(t) if with_line_numbers else t,
-            ]
-            comments = []
-            try:
-                # convert newline marks to actual newlines, and strip leading BOM if present
-                t = NL.transform_string(t.lstrip(BOM))
-                result = self.parse_string(t, parse_all=parseAll)
-            except ParseBaseException as pe:
-                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
-                out.append(pe.explain())
-                out.append("FAIL: " + str(pe))
-                if ParserElement.verbose_stacktrace:
-                    out.extend(traceback.format_tb(pe.__traceback__))
-                success = success and failureTests
-                result = pe
-            except Exception as exc:
-                out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc))
-                if ParserElement.verbose_stacktrace:
-                    out.extend(traceback.format_tb(exc.__traceback__))
-                success = success and failureTests
-                result = exc
-            else:
-                success = success and not failureTests
-                if postParse is not None:
-                    try:
-                        pp_value = postParse(t, result)
-                        if pp_value is not None:
-                            if isinstance(pp_value, ParseResults):
-                                out.append(pp_value.dump())
-                            else:
-                                out.append(str(pp_value))
-                        else:
-                            out.append(result.dump())
-                    except Exception as e:
-                        out.append(result.dump(full=fullDump))
-                        out.append(
-                            "{} failed: {}: {}".format(
-                                postParse.__name__, type(e).__name__, e
-                            )
-                        )
-                else:
-                    out.append(result.dump(full=fullDump))
-            out.append("")
-
-            if printResults:
-                print_("\n".join(out))
-
-            allResults.append((t, result))
-
-        return success, allResults
-
-    def create_diagram(
-        self,
-        output_html: Union[TextIO, Path, str],
-        vertical: int = 3,
-        show_results_names: bool = False,
-        show_groups: bool = False,
-        **kwargs,
-    ) -> None:
-        """
-        Create a railroad diagram for the parser.
-
-        Parameters:
-        - output_html (str or file-like object) - output target for generated
-          diagram HTML
-        - vertical (int) - threshold for formatting multiple alternatives vertically
-          instead of horizontally (default=3)
-        - show_results_names - bool flag whether diagram should show annotations for
-          defined results names
-        - show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box
-        Additional diagram-formatting keyword arguments can also be included;
-        see railroad.Diagram class.
-        """
-
-        try:
-            from .diagram import to_railroad, railroad_to_html
-        except ImportError as ie:
-            raise Exception(
-                "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams"
-            ) from ie
-
-        self.streamline()
-
-        railroad = to_railroad(
-            self,
-            vertical=vertical,
-            show_results_names=show_results_names,
-            show_groups=show_groups,
-            diagram_kwargs=kwargs,
-        )
-        if isinstance(output_html, (str, Path)):
-            with open(output_html, "w", encoding="utf-8") as diag_file:
-                diag_file.write(railroad_to_html(railroad))
-        else:
-            # we were passed a file-like object, just write to it
-            output_html.write(railroad_to_html(railroad))
-
-    setDefaultWhitespaceChars = set_default_whitespace_chars
-    inlineLiteralsUsing = inline_literals_using
-    setResultsName = set_results_name
-    setBreak = set_break
-    setParseAction = set_parse_action
-    addParseAction = add_parse_action
-    addCondition = add_condition
-    setFailAction = set_fail_action
-    tryParse = try_parse
-    canParseNext = can_parse_next
-    resetCache = reset_cache
-    enableLeftRecursion = enable_left_recursion
-    enablePackrat = enable_packrat
-    parseString = parse_string
-    scanString = scan_string
-    searchString = search_string
-    transformString = transform_string
-    setWhitespaceChars = set_whitespace_chars
-    parseWithTabs = parse_with_tabs
-    setDebugActions = set_debug_actions
-    setDebug = set_debug
-    defaultName = default_name
-    setName = set_name
-    parseFile = parse_file
-    runTests = run_tests
-    ignoreWhitespace = ignore_whitespace
-    leaveWhitespace = leave_whitespace
-
-
-class _PendingSkip(ParserElement):
-    # internal placeholder class to hold a place were '...' is added to a parser element,
-    # once another ParserElement is added, this placeholder will be replaced with a SkipTo
-    def __init__(self, expr: ParserElement, must_skip: bool = False):
-        super().__init__()
-        self.anchor = expr
-        self.must_skip = must_skip
-
-    def _generateDefaultName(self):
-        return str(self.anchor + Empty()).replace("Empty", "...")
-
-    def __add__(self, other) -> "ParserElement":
-        skipper = SkipTo(other).set_name("...")("_skipped*")
-        if self.must_skip:
-
-            def must_skip(t):
-                if not t._skipped or t._skipped.as_list() == [""]:
-                    del t[0]
-                    t.pop("_skipped", None)
-
-            def show_skip(t):
-                if t._skipped.as_list()[-1:] == [""]:
-                    t.pop("_skipped")
-                    t["_skipped"] = "missing <" + repr(self.anchor) + ">"
-
-            return (
-                self.anchor + skipper().add_parse_action(must_skip)
-                | skipper().add_parse_action(show_skip)
-            ) + other
-
-        return self.anchor + skipper + other
-
-    def __repr__(self):
-        return self.defaultName
-
-    def parseImpl(self, *args):
-        raise Exception(
-            "use of `...` expression without following SkipTo target expression"
-        )
-
-
-class Token(ParserElement):
-    """Abstract :class:`ParserElement` subclass, for defining atomic
-    matching patterns.
-    """
-
-    def __init__(self):
-        super().__init__(savelist=False)
-
-    def _generateDefaultName(self):
-        return type(self).__name__
-
-
-class Empty(Token):
-    """
-    An empty token, will always match.
-    """
-
-    def __init__(self):
-        super().__init__()
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-
-
-class NoMatch(Token):
-    """
-    A token that will never match.
-    """
-
-    def __init__(self):
-        super().__init__()
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-        self.errmsg = "Unmatchable token"
-
-    def parseImpl(self, instring, loc, doActions=True):
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Literal(Token):
-    """
-    Token to exactly match a specified string.
-
-    Example::
-
-        Literal('blah').parse_string('blah')  # -> ['blah']
-        Literal('blah').parse_string('blahfooblah')  # -> ['blah']
-        Literal('blah').parse_string('bla')  # -> Exception: Expected "blah"
-
-    For case-insensitive matching, use :class:`CaselessLiteral`.
-
-    For keyword matching (force word break before and after the matched string),
-    use :class:`Keyword` or :class:`CaselessKeyword`.
-    """
-
-    def __init__(self, match_string: str = "", *, matchString: str = ""):
-        super().__init__()
-        match_string = matchString or match_string
-        self.match = match_string
-        self.matchLen = len(match_string)
-        try:
-            self.firstMatchChar = match_string[0]
-        except IndexError:
-            raise ValueError("null string passed to Literal; use Empty() instead")
-        self.errmsg = "Expected " + self.name
-        self.mayReturnEmpty = False
-        self.mayIndexError = False
-
-        # Performance tuning: modify __class__ to select
-        # a parseImpl optimized for single-character check
-        if self.matchLen == 1 and type(self) is Literal:
-            self.__class__ = _SingleCharLiteral
-
-    def _generateDefaultName(self):
-        return repr(self.match)
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if instring[loc] == self.firstMatchChar and instring.startswith(
-            self.match, loc
-        ):
-            return loc + self.matchLen, self.match
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class _SingleCharLiteral(Literal):
-    def parseImpl(self, instring, loc, doActions=True):
-        if instring[loc] == self.firstMatchChar:
-            return loc + 1, self.match
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-ParserElement._literalStringClass = Literal
-
-
-class Keyword(Token):
-    """
-    Token to exactly match a specified string as a keyword, that is,
-    it must be immediately followed by a non-keyword character.  Compare
-    with :class:`Literal`:
-
-    - ``Literal("if")`` will match the leading ``'if'`` in
-      ``'ifAndOnlyIf'``.
-    - ``Keyword("if")`` will not; it will only match the leading
-      ``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
-
-    Accepts two optional constructor arguments in addition to the
-    keyword string:
-
-    - ``identChars`` is a string of characters that would be valid
-      identifier characters, defaulting to all alphanumerics + "_" and
-      "$"
-    - ``caseless`` allows case-insensitive matching, default is ``False``.
-
-    Example::
-
-        Keyword("start").parse_string("start")  # -> ['start']
-        Keyword("start").parse_string("starting")  # -> Exception
-
-    For case-insensitive matching, use :class:`CaselessKeyword`.
-    """
-
-    DEFAULT_KEYWORD_CHARS = alphanums + "_$"
-
-    def __init__(
-        self,
-        match_string: str = "",
-        ident_chars: OptionalType[str] = None,
-        caseless: bool = False,
-        *,
-        matchString: str = "",
-        identChars: OptionalType[str] = None,
-    ):
-        super().__init__()
-        identChars = identChars or ident_chars
-        if identChars is None:
-            identChars = Keyword.DEFAULT_KEYWORD_CHARS
-        match_string = matchString or match_string
-        self.match = match_string
-        self.matchLen = len(match_string)
-        try:
-            self.firstMatchChar = match_string[0]
-        except IndexError:
-            raise ValueError("null string passed to Keyword; use Empty() instead")
-        self.errmsg = "Expected {} {}".format(type(self).__name__, self.name)
-        self.mayReturnEmpty = False
-        self.mayIndexError = False
-        self.caseless = caseless
-        if caseless:
-            self.caselessmatch = match_string.upper()
-            identChars = identChars.upper()
-        self.identChars = set(identChars)
-
-    def _generateDefaultName(self):
-        return repr(self.match)
-
-    def parseImpl(self, instring, loc, doActions=True):
-        errmsg = self.errmsg
-        errloc = loc
-        if self.caseless:
-            if instring[loc : loc + self.matchLen].upper() == self.caselessmatch:
-                if loc == 0 or instring[loc - 1].upper() not in self.identChars:
-                    if (
-                        loc >= len(instring) - self.matchLen
-                        or instring[loc + self.matchLen].upper() not in self.identChars
-                    ):
-                        return loc + self.matchLen, self.match
-                    else:
-                        # followed by keyword char
-                        errmsg += ", was immediately followed by keyword character"
-                        errloc = loc + self.matchLen
-                else:
-                    # preceded by keyword char
-                    errmsg += ", keyword was immediately preceded by keyword character"
-                    errloc = loc - 1
-            # else no match just raise plain exception
-
-        else:
-            if (
-                instring[loc] == self.firstMatchChar
-                and self.matchLen == 1
-                or instring.startswith(self.match, loc)
-            ):
-                if loc == 0 or instring[loc - 1] not in self.identChars:
-                    if (
-                        loc >= len(instring) - self.matchLen
-                        or instring[loc + self.matchLen] not in self.identChars
-                    ):
-                        return loc + self.matchLen, self.match
-                    else:
-                        # followed by keyword char
-                        errmsg += (
-                            ", keyword was immediately followed by keyword character"
-                        )
-                        errloc = loc + self.matchLen
-                else:
-                    # preceded by keyword char
-                    errmsg += ", keyword was immediately preceded by keyword character"
-                    errloc = loc - 1
-            # else no match just raise plain exception
-
-        raise ParseException(instring, errloc, errmsg, self)
-
-    @staticmethod
-    def set_default_keyword_chars(chars) -> None:
-        """
-        Overrides the default characters used by :class:`Keyword` expressions.
-        """
-        Keyword.DEFAULT_KEYWORD_CHARS = chars
-
-    setDefaultKeywordChars = set_default_keyword_chars
-
-
-class CaselessLiteral(Literal):
-    """
-    Token to match a specified string, ignoring case of letters.
-    Note: the matched results will always be in the case of the given
-    match string, NOT the case of the input text.
-
-    Example::
-
-        OneOrMore(CaselessLiteral("CMD")).parse_string("cmd CMD Cmd10")
-        # -> ['CMD', 'CMD', 'CMD']
-
-    (Contrast with example for :class:`CaselessKeyword`.)
-    """
-
-    def __init__(self, match_string: str = "", *, matchString: str = ""):
-        match_string = matchString or match_string
-        super().__init__(match_string.upper())
-        # Preserve the defining literal.
-        self.returnString = match_string
-        self.errmsg = "Expected " + self.name
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if instring[loc : loc + self.matchLen].upper() == self.match:
-            return loc + self.matchLen, self.returnString
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class CaselessKeyword(Keyword):
-    """
-    Caseless version of :class:`Keyword`.
-
-    Example::
-
-        OneOrMore(CaselessKeyword("CMD")).parse_string("cmd CMD Cmd10")
-        # -> ['CMD', 'CMD']
-
-    (Contrast with example for :class:`CaselessLiteral`.)
-    """
-
-    def __init__(
-        self,
-        match_string: str = "",
-        ident_chars: OptionalType[str] = None,
-        *,
-        matchString: str = "",
-        identChars: OptionalType[str] = None,
-    ):
-        identChars = identChars or ident_chars
-        match_string = matchString or match_string
-        super().__init__(match_string, identChars, caseless=True)
-
-
-class CloseMatch(Token):
-    """A variation on :class:`Literal` which matches "close" matches,
-    that is, strings with at most 'n' mismatching characters.
-    :class:`CloseMatch` takes parameters:
-
-    - ``match_string`` - string to be matched
-    - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters
-    - ``max_mismatches`` - (``default=1``) maximum number of
-      mismatches allowed to count as a match
-
-    The results from a successful parse will contain the matched text
-    from the input string and the following named results:
-
-    - ``mismatches`` - a list of the positions within the
-      match_string where mismatches were found
-    - ``original`` - the original match_string used to compare
-      against the input string
-
-    If ``mismatches`` is an empty list, then the match was an exact
-    match.
-
-    Example::
-
-        patt = CloseMatch("ATCATCGAATGGA")
-        patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
-        patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
-
-        # exact match
-        patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
-
-        # close match allowing up to 2 mismatches
-        patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2)
-        patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
-    """
-
-    def __init__(
-        self,
-        match_string: str,
-        max_mismatches: int = None,
-        *,
-        maxMismatches: int = 1,
-        caseless=False,
-    ):
-        maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches
-        super().__init__()
-        self.match_string = match_string
-        self.maxMismatches = maxMismatches
-        self.errmsg = "Expected {!r} (with up to {} mismatches)".format(
-            self.match_string, self.maxMismatches
-        )
-        self.caseless = caseless
-        self.mayIndexError = False
-        self.mayReturnEmpty = False
-
-    def _generateDefaultName(self):
-        return "{}:{!r}".format(type(self).__name__, self.match_string)
-
-    def parseImpl(self, instring, loc, doActions=True):
-        start = loc
-        instrlen = len(instring)
-        maxloc = start + len(self.match_string)
-
-        if maxloc <= instrlen:
-            match_string = self.match_string
-            match_stringloc = 0
-            mismatches = []
-            maxMismatches = self.maxMismatches
-
-            for match_stringloc, s_m in enumerate(
-                zip(instring[loc:maxloc], match_string)
-            ):
-                src, mat = s_m
-                if self.caseless:
-                    src, mat = src.lower(), mat.lower()
-
-                if src != mat:
-                    mismatches.append(match_stringloc)
-                    if len(mismatches) > maxMismatches:
-                        break
-            else:
-                loc = start + match_stringloc + 1
-                results = ParseResults([instring[start:loc]])
-                results["original"] = match_string
-                results["mismatches"] = mismatches
-                return loc, results
-
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Word(Token):
-    """Token for matching words composed of allowed character sets.
-    Parameters:
-    - ``init_chars`` - string of all characters that should be used to
-      match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.;
-      if ``body_chars`` is also specified, then this is the string of
-      initial characters
-    - ``body_chars`` - string of characters that
-      can be used for matching after a matched initial character as
-      given in ``init_chars``; if omitted, same as the initial characters
-      (default=``None``)
-    - ``min`` - minimum number of characters to match (default=1)
-    - ``max`` - maximum number of characters to match (default=0)
-    - ``exact`` - exact number of characters to match (default=0)
-    - ``as_keyword`` - match as a keyword (default=``False``)
-    - ``exclude_chars`` - characters that might be
-      found in the input ``body_chars`` string but which should not be
-      accepted for matching ;useful to define a word of all
-      printables except for one or two characters, for instance
-      (default=``None``)
-
-    :class:`srange` is useful for defining custom character set strings
-    for defining :class:`Word` expressions, using range notation from
-    regular expression character sets.
-
-    A common mistake is to use :class:`Word` to match a specific literal
-    string, as in ``Word("Address")``. Remember that :class:`Word`
-    uses the string argument to define *sets* of matchable characters.
-    This expression would match "Add", "AAA", "dAred", or any other word
-    made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
-    exact literal string, use :class:`Literal` or :class:`Keyword`.
-
-    pyparsing includes helper strings for building Words:
-
-    - :class:`alphas`
-    - :class:`nums`
-    - :class:`alphanums`
-    - :class:`hexnums`
-    - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
-      - accented, tilded, umlauted, etc.)
-    - :class:`punc8bit` (non-alphabetic characters in ASCII range
-      128-255 - currency, symbols, superscripts, diacriticals, etc.)
-    - :class:`printables` (any non-whitespace character)
-
-    ``alphas``, ``nums``, and ``printables`` are also defined in several
-    Unicode sets - see :class:`pyparsing_unicode``.
-
-    Example::
-
-        # a word composed of digits
-        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
-
-        # a word with a leading capital, and zero or more lowercase
-        capital_word = Word(alphas.upper(), alphas.lower())
-
-        # hostnames are alphanumeric, with leading alpha, and '-'
-        hostname = Word(alphas, alphanums + '-')
-
-        # roman numeral (not a strict parser, accepts invalid mix of characters)
-        roman = Word("IVXLCDM")
-
-        # any string of non-whitespace characters, except for ','
-        csv_value = Word(printables, exclude_chars=",")
-    """
-
-    def __init__(
-        self,
-        init_chars: str = "",
-        body_chars: OptionalType[str] = None,
-        min: int = 1,
-        max: int = 0,
-        exact: int = 0,
-        as_keyword: bool = False,
-        exclude_chars: OptionalType[str] = None,
-        *,
-        initChars: OptionalType[str] = None,
-        bodyChars: OptionalType[str] = None,
-        asKeyword: bool = False,
-        excludeChars: OptionalType[str] = None,
-    ):
-        initChars = initChars or init_chars
-        bodyChars = bodyChars or body_chars
-        asKeyword = asKeyword or as_keyword
-        excludeChars = excludeChars or exclude_chars
-        super().__init__()
-        if not initChars:
-            raise ValueError(
-                "invalid {}, initChars cannot be empty string".format(
-                    type(self).__name__
-                )
-            )
-
-        initChars = set(initChars)
-        self.initChars = initChars
-        if excludeChars:
-            excludeChars = set(excludeChars)
-            initChars -= excludeChars
-            if bodyChars:
-                bodyChars = set(bodyChars) - excludeChars
-        self.initCharsOrig = "".join(sorted(initChars))
-
-        if bodyChars:
-            self.bodyCharsOrig = "".join(sorted(bodyChars))
-            self.bodyChars = set(bodyChars)
-        else:
-            self.bodyCharsOrig = "".join(sorted(initChars))
-            self.bodyChars = set(initChars)
-
-        self.maxSpecified = max > 0
-
-        if min < 1:
-            raise ValueError(
-                "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted"
-            )
-
-        self.minLen = min
-
-        if max > 0:
-            self.maxLen = max
-        else:
-            self.maxLen = _MAX_INT
-
-        if exact > 0:
-            self.maxLen = exact
-            self.minLen = exact
-
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.asKeyword = asKeyword
-
-        # see if we can make a regex for this Word
-        if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0):
-            if self.bodyChars == self.initChars:
-                if max == 0:
-                    repeat = "+"
-                elif max == 1:
-                    repeat = ""
-                else:
-                    repeat = "{{{},{}}}".format(
-                        self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen
-                    )
-                self.reString = "[{}]{}".format(
-                    _collapse_string_to_ranges(self.initChars),
-                    repeat,
-                )
-            elif len(self.initChars) == 1:
-                if max == 0:
-                    repeat = "*"
-                else:
-                    repeat = "{{0,{}}}".format(max - 1)
-                self.reString = "{}[{}]{}".format(
-                    re.escape(self.initCharsOrig),
-                    _collapse_string_to_ranges(self.bodyChars),
-                    repeat,
-                )
-            else:
-                if max == 0:
-                    repeat = "*"
-                elif max == 2:
-                    repeat = ""
-                else:
-                    repeat = "{{0,{}}}".format(max - 1)
-                self.reString = "[{}][{}]{}".format(
-                    _collapse_string_to_ranges(self.initChars),
-                    _collapse_string_to_ranges(self.bodyChars),
-                    repeat,
-                )
-            if self.asKeyword:
-                self.reString = r"\b" + self.reString + r"\b"
-
-            try:
-                self.re = re.compile(self.reString)
-            except re.error:
-                self.re = None
-            else:
-                self.re_match = self.re.match
-                self.__class__ = _WordRegex
-
-    def _generateDefaultName(self):
-        def charsAsStr(s):
-            max_repr_len = 16
-            s = _collapse_string_to_ranges(s, re_escape=False)
-            if len(s) > max_repr_len:
-                return s[: max_repr_len - 3] + "..."
-            else:
-                return s
-
-        if self.initChars != self.bodyChars:
-            base = "W:({}, {})".format(
-                charsAsStr(self.initChars), charsAsStr(self.bodyChars)
-            )
-        else:
-            base = "W:({})".format(charsAsStr(self.initChars))
-
-        # add length specification
-        if self.minLen > 1 or self.maxLen != _MAX_INT:
-            if self.minLen == self.maxLen:
-                if self.minLen == 1:
-                    return base[2:]
-                else:
-                    return base + "{{{}}}".format(self.minLen)
-            elif self.maxLen == _MAX_INT:
-                return base + "{{{},...}}".format(self.minLen)
-            else:
-                return base + "{{{},{}}}".format(self.minLen, self.maxLen)
-        return base
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if instring[loc] not in self.initChars:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        start = loc
-        loc += 1
-        instrlen = len(instring)
-        bodychars = self.bodyChars
-        maxloc = start + self.maxLen
-        maxloc = min(maxloc, instrlen)
-        while loc < maxloc and instring[loc] in bodychars:
-            loc += 1
-
-        throwException = False
-        if loc - start < self.minLen:
-            throwException = True
-        elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
-            throwException = True
-        elif self.asKeyword:
-            if (
-                start > 0
-                and instring[start - 1] in bodychars
-                or loc < instrlen
-                and instring[loc] in bodychars
-            ):
-                throwException = True
-
-        if throwException:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        return loc, instring[start:loc]
-
-
-class _WordRegex(Word):
-    def parseImpl(self, instring, loc, doActions=True):
-        result = self.re_match(instring, loc)
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        return loc, result.group()
-
-
-class Char(_WordRegex):
-    """A short-cut class for defining :class:`Word` ``(characters, exact=1)``,
-    when defining a match of any single character in a string of
-    characters.
-    """
-
-    def __init__(
-        self,
-        charset: str,
-        as_keyword: bool = False,
-        exclude_chars: OptionalType[str] = None,
-        *,
-        asKeyword: bool = False,
-        excludeChars: OptionalType[str] = None,
-    ):
-        asKeyword = asKeyword or as_keyword
-        excludeChars = excludeChars or exclude_chars
-        super().__init__(
-            charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars
-        )
-        self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars))
-        if asKeyword:
-            self.reString = r"\b{}\b".format(self.reString)
-        self.re = re.compile(self.reString)
-        self.re_match = self.re.match
-
-
-class Regex(Token):
-    r"""Token for matching strings that match a given regular
-    expression. Defined with string specifying the regular expression in
-    a form recognized by the stdlib Python  `re module <https://docs.python.org/3/library/re.html>`_.
-    If the given regex contains named groups (defined using ``(?P<name>...)``),
-    these will be preserved as named :class:`ParseResults`.
-
-    If instead of the Python stdlib ``re`` module you wish to use a different RE module
-    (such as the ``regex`` module), you can do so by building your ``Regex`` object with
-    a compiled RE that was compiled using ``regex``.
-
-    Example::
-
-        realnum = Regex(r"[+-]?\d+\.\d*")
-        # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
-        roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
-
-        # named fields in a regex will be returned as named results
-        date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
-
-        # the Regex class will accept re's compiled using the regex module
-        import regex
-        parser = pp.Regex(regex.compile(r'[0-9]'))
-    """
-
-    def __init__(
-        self,
-        pattern: Any,
-        flags: Union[re.RegexFlag, int] = 0,
-        as_group_list: bool = False,
-        as_match: bool = False,
-        *,
-        asGroupList: bool = False,
-        asMatch: bool = False,
-    ):
-        """The parameters ``pattern`` and ``flags`` are passed
-        to the ``re.compile()`` function as-is. See the Python
-        `re module <https://docs.python.org/3/library/re.html>`_ module for an
-        explanation of the acceptable patterns and flags.
-        """
-        super().__init__()
-        asGroupList = asGroupList or as_group_list
-        asMatch = asMatch or as_match
-
-        if isinstance(pattern, str_type):
-            if not pattern:
-                raise ValueError("null string passed to Regex; use Empty() instead")
-
-            self._re = None
-            self.reString = self.pattern = pattern
-            self.flags = flags
-
-        elif hasattr(pattern, "pattern") and hasattr(pattern, "match"):
-            self._re = pattern
-            self.pattern = self.reString = pattern.pattern
-            self.flags = flags
-
-        else:
-            raise TypeError(
-                "Regex may only be constructed with a string or a compiled RE object"
-            )
-
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.asGroupList = asGroupList
-        self.asMatch = asMatch
-        if self.asGroupList:
-            self.parseImpl = self.parseImplAsGroupList
-        if self.asMatch:
-            self.parseImpl = self.parseImplAsMatch
-
-    @cached_property
-    def re(self):
-        if self._re:
-            return self._re
-        else:
-            try:
-                return re.compile(self.pattern, self.flags)
-            except re.error:
-                raise ValueError(
-                    "invalid pattern ({!r}) passed to Regex".format(self.pattern)
-                )
-
-    @cached_property
-    def re_match(self):
-        return self.re.match
-
-    @cached_property
-    def mayReturnEmpty(self):
-        return self.re_match("") is not None
-
-    def _generateDefaultName(self):
-        return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\"))
-
-    def parseImpl(self, instring, loc, doActions=True):
-        result = self.re_match(instring, loc)
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        ret = ParseResults(result.group())
-        d = result.groupdict()
-        if d:
-            for k, v in d.items():
-                ret[k] = v
-        return loc, ret
-
-    def parseImplAsGroupList(self, instring, loc, doActions=True):
-        result = self.re_match(instring, loc)
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        ret = result.groups()
-        return loc, ret
-
-    def parseImplAsMatch(self, instring, loc, doActions=True):
-        result = self.re_match(instring, loc)
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        ret = result
-        return loc, ret
-
-    def sub(self, repl: str) -> ParserElement:
-        r"""
-        Return :class:`Regex` with an attached parse action to transform the parsed
-        result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
-
-        Example::
-
-            make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
-            print(make_html.transform_string("h1:main title:"))
-            # prints "<h1>main title</h1>"
-        """
-        if self.asGroupList:
-            raise TypeError("cannot use sub() with Regex(asGroupList=True)")
-
-        if self.asMatch and callable(repl):
-            raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)")
-
-        if self.asMatch:
-
-            def pa(tokens):
-                return tokens[0].expand(repl)
-
-        else:
-
-            def pa(tokens):
-                return self.re.sub(repl, tokens[0])
-
-        return self.add_parse_action(pa)
-
-
-class QuotedString(Token):
-    r"""
-    Token for matching strings that are delimited by quoting characters.
-
-    Defined with the following parameters:
-
-    - ``quote_char`` - string of one or more characters defining the
-      quote delimiting string
-    - ``esc_char`` - character to re_escape quotes, typically backslash
-      (default= ``None``)
-    - ``esc_quote`` - special quote sequence to re_escape an embedded quote
-      string (such as SQL's ``""`` to re_escape an embedded ``"``)
-      (default= ``None``)
-    - ``multiline`` - boolean indicating whether quotes can span
-      multiple lines (default= ``False``)
-    - ``unquote_results`` - boolean indicating whether the matched text
-      should be unquoted (default= ``True``)
-    - ``end_quote_char`` - string of one or more characters defining the
-      end of the quote delimited string (default= ``None``  => same as
-      quote_char)
-    - ``convert_whitespace_escapes`` - convert escaped whitespace
-      (``'\t'``, ``'\n'``, etc.) to actual whitespace
-      (default= ``True``)
-
-    Example::
-
-        qs = QuotedString('"')
-        print(qs.search_string('lsjdf "This is the quote" sldjf'))
-        complex_qs = QuotedString('{{', end_quote_char='}}')
-        print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf'))
-        sql_qs = QuotedString('"', esc_quote='""')
-        print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
-
-    prints::
-
-        [['This is the quote']]
-        [['This is the "quote"']]
-        [['This is the quote with "embedded" quotes']]
-    """
-    ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))
-
-    def __init__(
-        self,
-        quote_char: str = "",
-        esc_char: OptionalType[str] = None,
-        esc_quote: OptionalType[str] = None,
-        multiline: bool = False,
-        unquote_results: bool = True,
-        end_quote_char: OptionalType[str] = None,
-        convert_whitespace_escapes: bool = True,
-        *,
-        quoteChar: str = "",
-        escChar: OptionalType[str] = None,
-        escQuote: OptionalType[str] = None,
-        unquoteResults: bool = True,
-        endQuoteChar: OptionalType[str] = None,
-        convertWhitespaceEscapes: bool = True,
-    ):
-        super().__init__()
-        escChar = escChar or esc_char
-        escQuote = escQuote or esc_quote
-        unquoteResults = unquoteResults and unquote_results
-        endQuoteChar = endQuoteChar or end_quote_char
-        convertWhitespaceEscapes = (
-            convertWhitespaceEscapes and convert_whitespace_escapes
-        )
-        quote_char = quoteChar or quote_char
-
-        # remove white space from quote chars - wont work anyway
-        quote_char = quote_char.strip()
-        if not quote_char:
-            raise ValueError("quote_char cannot be the empty string")
-
-        if endQuoteChar is None:
-            endQuoteChar = quote_char
-        else:
-            endQuoteChar = endQuoteChar.strip()
-            if not endQuoteChar:
-                raise ValueError("endQuoteChar cannot be the empty string")
-
-        self.quoteChar = quote_char
-        self.quoteCharLen = len(quote_char)
-        self.firstQuoteChar = quote_char[0]
-        self.endQuoteChar = endQuoteChar
-        self.endQuoteCharLen = len(endQuoteChar)
-        self.escChar = escChar
-        self.escQuote = escQuote
-        self.unquoteResults = unquoteResults
-        self.convertWhitespaceEscapes = convertWhitespaceEscapes
-
-        sep = ""
-        inner_pattern = ""
-
-        if escQuote:
-            inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote))
-            sep = "|"
-
-        if escChar:
-            inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar))
-            sep = "|"
-            self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
-
-        if len(self.endQuoteChar) > 1:
-            inner_pattern += (
-                "{}(?:".format(sep)
-                + "|".join(
-                    "(?:{}(?!{}))".format(
-                        re.escape(self.endQuoteChar[:i]),
-                        re.escape(self.endQuoteChar[i:]),
-                    )
-                    for i in range(len(self.endQuoteChar) - 1, 0, -1)
-                )
-                + ")"
-            )
-            sep = "|"
-
-        if multiline:
-            self.flags = re.MULTILINE | re.DOTALL
-            inner_pattern += r"{}(?:[^{}{}])".format(
-                sep,
-                _escape_regex_range_chars(self.endQuoteChar[0]),
-                (_escape_regex_range_chars(escChar) if escChar is not None else ""),
-            )
-        else:
-            self.flags = 0
-            inner_pattern += r"{}(?:[^{}\n\r{}])".format(
-                sep,
-                _escape_regex_range_chars(self.endQuoteChar[0]),
-                (_escape_regex_range_chars(escChar) if escChar is not None else ""),
-            )
-
-        self.pattern = "".join(
-            [
-                re.escape(self.quoteChar),
-                "(?:",
-                inner_pattern,
-                ")*",
-                re.escape(self.endQuoteChar),
-            ]
-        )
-
-        try:
-            self.re = re.compile(self.pattern, self.flags)
-            self.reString = self.pattern
-            self.re_match = self.re.match
-        except re.error:
-            raise ValueError(
-                "invalid pattern {!r} passed to Regex".format(self.pattern)
-            )
-
-        self.errmsg = "Expected " + self.name
-        self.mayIndexError = False
-        self.mayReturnEmpty = True
-
-    def _generateDefaultName(self):
-        if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type):
-            return "string enclosed in {!r}".format(self.quoteChar)
-
-        return "quoted string, starting with {} ending with {}".format(
-            self.quoteChar, self.endQuoteChar
-        )
-
-    def parseImpl(self, instring, loc, doActions=True):
-        result = (
-            instring[loc] == self.firstQuoteChar
-            and self.re_match(instring, loc)
-            or None
-        )
-        if not result:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        loc = result.end()
-        ret = result.group()
-
-        if self.unquoteResults:
-
-            # strip off quotes
-            ret = ret[self.quoteCharLen : -self.endQuoteCharLen]
-
-            if isinstance(ret, str_type):
-                # replace escaped whitespace
-                if "\\" in ret and self.convertWhitespaceEscapes:
-                    for wslit, wschar in self.ws_map:
-                        ret = ret.replace(wslit, wschar)
-
-                # replace escaped characters
-                if self.escChar:
-                    ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
-
-                # replace escaped quotes
-                if self.escQuote:
-                    ret = ret.replace(self.escQuote, self.endQuoteChar)
-
-        return loc, ret
-
-
-class CharsNotIn(Token):
-    """Token for matching words composed of characters *not* in a given
-    set (will include whitespace in matched characters if not listed in
-    the provided exclusion set - see example). Defined with string
-    containing all disallowed characters, and an optional minimum,
-    maximum, and/or exact length.  The default value for ``min`` is
-    1 (a minimum value < 1 is not valid); the default values for
-    ``max`` and ``exact`` are 0, meaning no maximum or exact
-    length restriction.
-
-    Example::
-
-        # define a comma-separated-value as anything that is not a ','
-        csv_value = CharsNotIn(',')
-        print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213"))
-
-    prints::
-
-        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
-    """
-
-    def __init__(
-        self,
-        not_chars: str = "",
-        min: int = 1,
-        max: int = 0,
-        exact: int = 0,
-        *,
-        notChars: str = "",
-    ):
-        super().__init__()
-        self.skipWhitespace = False
-        self.notChars = not_chars or notChars
-        self.notCharsSet = set(self.notChars)
-
-        if min < 1:
-            raise ValueError(
-                "cannot specify a minimum length < 1; use "
-                "Opt(CharsNotIn()) if zero-length char group is permitted"
-            )
-
-        self.minLen = min
-
-        if max > 0:
-            self.maxLen = max
-        else:
-            self.maxLen = _MAX_INT
-
-        if exact > 0:
-            self.maxLen = exact
-            self.minLen = exact
-
-        self.errmsg = "Expected " + self.name
-        self.mayReturnEmpty = self.minLen == 0
-        self.mayIndexError = False
-
-    def _generateDefaultName(self):
-        not_chars_str = _collapse_string_to_ranges(self.notChars)
-        if len(not_chars_str) > 16:
-            return "!W:({}...)".format(self.notChars[: 16 - 3])
-        else:
-            return "!W:({})".format(self.notChars)
-
-    def parseImpl(self, instring, loc, doActions=True):
-        notchars = self.notCharsSet
-        if instring[loc] in notchars:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        start = loc
-        loc += 1
-        maxlen = min(start + self.maxLen, len(instring))
-        while loc < maxlen and instring[loc] not in notchars:
-            loc += 1
-
-        if loc - start < self.minLen:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        return loc, instring[start:loc]
-
-
-class White(Token):
-    """Special matching class for matching whitespace.  Normally,
-    whitespace is ignored by pyparsing grammars.  This class is included
-    when some whitespace structures are significant.  Define with
-    a string containing the whitespace characters to be matched; default
-    is ``" \\t\\r\\n"``.  Also takes optional ``min``,
-    ``max``, and ``exact`` arguments, as defined for the
-    :class:`Word` class.
-    """
-
-    whiteStrs = {
-        " ": "<SP>",
-        "\t": "<TAB>",
-        "\n": "<LF>",
-        "\r": "<CR>",
-        "\f": "<FF>",
-        "\u00A0": "<NBSP>",
-        "\u1680": "<OGHAM_SPACE_MARK>",
-        "\u180E": "<MONGOLIAN_VOWEL_SEPARATOR>",
-        "\u2000": "<EN_QUAD>",
-        "\u2001": "<EM_QUAD>",
-        "\u2002": "<EN_SPACE>",
-        "\u2003": "<EM_SPACE>",
-        "\u2004": "<THREE-PER-EM_SPACE>",
-        "\u2005": "<FOUR-PER-EM_SPACE>",
-        "\u2006": "<SIX-PER-EM_SPACE>",
-        "\u2007": "<FIGURE_SPACE>",
-        "\u2008": "<PUNCTUATION_SPACE>",
-        "\u2009": "<THIN_SPACE>",
-        "\u200A": "<HAIR_SPACE>",
-        "\u200B": "<ZERO_WIDTH_SPACE>",
-        "\u202F": "<NNBSP>",
-        "\u205F": "<MMSP>",
-        "\u3000": "<IDEOGRAPHIC_SPACE>",
-    }
-
-    def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0):
-        super().__init__()
-        self.matchWhite = ws
-        self.set_whitespace_chars(
-            "".join(c for c in self.whiteStrs if c not in self.matchWhite),
-            copy_defaults=True,
-        )
-        # self.leave_whitespace()
-        self.mayReturnEmpty = True
-        self.errmsg = "Expected " + self.name
-
-        self.minLen = min
-
-        if max > 0:
-            self.maxLen = max
-        else:
-            self.maxLen = _MAX_INT
-
-        if exact > 0:
-            self.maxLen = exact
-            self.minLen = exact
-
-    def _generateDefaultName(self):
-        return "".join(White.whiteStrs[c] for c in self.matchWhite)
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if instring[loc] not in self.matchWhite:
-            raise ParseException(instring, loc, self.errmsg, self)
-        start = loc
-        loc += 1
-        maxloc = start + self.maxLen
-        maxloc = min(maxloc, len(instring))
-        while loc < maxloc and instring[loc] in self.matchWhite:
-            loc += 1
-
-        if loc - start < self.minLen:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        return loc, instring[start:loc]
-
-
-class PositionToken(Token):
-    def __init__(self):
-        super().__init__()
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-
-
-class GoToColumn(PositionToken):
-    """Token to advance to a specific column of input text; useful for
-    tabular report scraping.
-    """
-
-    def __init__(self, colno: int):
-        super().__init__()
-        self.col = colno
-
-    def preParse(self, instring, loc):
-        if col(loc, instring) != self.col:
-            instrlen = len(instring)
-            if self.ignoreExprs:
-                loc = self._skipIgnorables(instring, loc)
-            while (
-                loc < instrlen
-                and instring[loc].isspace()
-                and col(loc, instring) != self.col
-            ):
-                loc += 1
-        return loc
-
-    def parseImpl(self, instring, loc, doActions=True):
-        thiscol = col(loc, instring)
-        if thiscol > self.col:
-            raise ParseException(instring, loc, "Text not in expected column", self)
-        newloc = loc + self.col - thiscol
-        ret = instring[loc:newloc]
-        return newloc, ret
-
-
-class LineStart(PositionToken):
-    r"""Matches if current position is at the beginning of a line within
-    the parse string
-
-    Example::
-
-        test = '''\
-        AAA this line
-        AAA and this line
-          AAA but not this one
-        B AAA and definitely not this one
-        '''
-
-        for t in (LineStart() + 'AAA' + restOfLine).search_string(test):
-            print(t)
-
-    prints::
-
-        ['AAA', ' this line']
-        ['AAA', ' and this line']
-
-    """
-
-    def __init__(self):
-        super().__init__()
-        self.leave_whitespace()
-        self.orig_whiteChars = set() | self.whiteChars
-        self.whiteChars.discard("\n")
-        self.skipper = Empty().set_whitespace_chars(self.whiteChars)
-        self.errmsg = "Expected start of line"
-
-    def preParse(self, instring, loc):
-        if loc == 0:
-            return loc
-        else:
-            ret = self.skipper.preParse(instring, loc)
-            if "\n" in self.orig_whiteChars:
-                while instring[ret : ret + 1] == "\n":
-                    ret = self.skipper.preParse(instring, ret + 1)
-            return ret
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if col(loc, instring) == 1:
-            return loc, []
-        raise ParseException(instring, loc, self.errmsg, self)
-
-
-class LineEnd(PositionToken):
-    """Matches if current position is at the end of a line within the
-    parse string
-    """
-
-    def __init__(self):
-        super().__init__()
-        self.whiteChars.discard("\n")
-        self.set_whitespace_chars(self.whiteChars, copy_defaults=False)
-        self.errmsg = "Expected end of line"
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if loc < len(instring):
-            if instring[loc] == "\n":
-                return loc + 1, "\n"
-            else:
-                raise ParseException(instring, loc, self.errmsg, self)
-        elif loc == len(instring):
-            return loc + 1, []
-        else:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-
-class StringStart(PositionToken):
-    """Matches if current position is at the beginning of the parse
-    string
-    """
-
-    def __init__(self):
-        super().__init__()
-        self.errmsg = "Expected start of text"
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if loc != 0:
-            # see if entire string up to here is just whitespace and ignoreables
-            if loc != self.preParse(instring, 0):
-                raise ParseException(instring, loc, self.errmsg, self)
-        return loc, []
-
-
-class StringEnd(PositionToken):
-    """
-    Matches if current position is at the end of the parse string
-    """
-
-    def __init__(self):
-        super().__init__()
-        self.errmsg = "Expected end of text"
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if loc < len(instring):
-            raise ParseException(instring, loc, self.errmsg, self)
-        elif loc == len(instring):
-            return loc + 1, []
-        elif loc > len(instring):
-            return loc, []
-        else:
-            raise ParseException(instring, loc, self.errmsg, self)
-
-
-class WordStart(PositionToken):
-    """Matches if the current position is at the beginning of a
-    :class:`Word`, and is not preceded by any character in a given
-    set of ``word_chars`` (default= ``printables``). To emulate the
-    ``\b`` behavior of regular expressions, use
-    ``WordStart(alphanums)``. ``WordStart`` will also match at
-    the beginning of the string being parsed, or at the beginning of
-    a line.
-    """
-
-    def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
-        wordChars = word_chars if wordChars == printables else wordChars
-        super().__init__()
-        self.wordChars = set(wordChars)
-        self.errmsg = "Not at the start of a word"
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if loc != 0:
-            if (
-                instring[loc - 1] in self.wordChars
-                or instring[loc] not in self.wordChars
-            ):
-                raise ParseException(instring, loc, self.errmsg, self)
-        return loc, []
-
-
-class WordEnd(PositionToken):
-    """Matches if the current position is at the end of a :class:`Word`,
-    and is not followed by any character in a given set of ``word_chars``
-    (default= ``printables``). To emulate the ``\b`` behavior of
-    regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
-    will also match at the end of the string being parsed, or at the end
-    of a line.
-    """
-
-    def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
-        wordChars = word_chars if wordChars == printables else wordChars
-        super().__init__()
-        self.wordChars = set(wordChars)
-        self.skipWhitespace = False
-        self.errmsg = "Not at the end of a word"
-
-    def parseImpl(self, instring, loc, doActions=True):
-        instrlen = len(instring)
-        if instrlen > 0 and loc < instrlen:
-            if (
-                instring[loc] in self.wordChars
-                or instring[loc - 1] not in self.wordChars
-            ):
-                raise ParseException(instring, loc, self.errmsg, self)
-        return loc, []
-
-
-class ParseExpression(ParserElement):
-    """Abstract subclass of ParserElement, for combining and
-    post-processing parsed tokens.
-    """
-
-    def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False):
-        super().__init__(savelist)
-        self.exprs: List[ParserElement]
-        if isinstance(exprs, _generatorType):
-            exprs = list(exprs)
-
-        if isinstance(exprs, str_type):
-            self.exprs = [self._literalStringClass(exprs)]
-        elif isinstance(exprs, ParserElement):
-            self.exprs = [exprs]
-        elif isinstance(exprs, Iterable):
-            exprs = list(exprs)
-            # if sequence of strings provided, wrap with Literal
-            if any(isinstance(expr, str_type) for expr in exprs):
-                exprs = (
-                    self._literalStringClass(e) if isinstance(e, str_type) else e
-                    for e in exprs
-                )
-            self.exprs = list(exprs)
-        else:
-            try:
-                self.exprs = list(exprs)
-            except TypeError:
-                self.exprs = [exprs]
-        self.callPreparse = False
-
-    def recurse(self) -> Sequence[ParserElement]:
-        return self.exprs[:]
-
-    def append(self, other) -> ParserElement:
-        self.exprs.append(other)
-        self._defaultName = None
-        return self
-
-    def leave_whitespace(self, recursive: bool = True) -> ParserElement:
-        """
-        Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
-           all contained expressions.
-        """
-        super().leave_whitespace(recursive)
-
-        if recursive:
-            self.exprs = [e.copy() for e in self.exprs]
-            for e in self.exprs:
-                e.leave_whitespace(recursive)
-        return self
-
-    def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
-        """
-        Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
-           all contained expressions.
-        """
-        super().ignore_whitespace(recursive)
-        if recursive:
-            self.exprs = [e.copy() for e in self.exprs]
-            for e in self.exprs:
-                e.ignore_whitespace(recursive)
-        return self
-
-    def ignore(self, other) -> ParserElement:
-        if isinstance(other, Suppress):
-            if other not in self.ignoreExprs:
-                super().ignore(other)
-                for e in self.exprs:
-                    e.ignore(self.ignoreExprs[-1])
-        else:
-            super().ignore(other)
-            for e in self.exprs:
-                e.ignore(self.ignoreExprs[-1])
-        return self
-
-    def _generateDefaultName(self):
-        return "{}:({})".format(self.__class__.__name__, str(self.exprs))
-
-    def streamline(self) -> ParserElement:
-        if self.streamlined:
-            return self
-
-        super().streamline()
-
-        for e in self.exprs:
-            e.streamline()
-
-        # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)``
-        # but only if there are no parse actions or resultsNames on the nested And's
-        # (likewise for :class:`Or`'s and :class:`MatchFirst`'s)
-        if len(self.exprs) == 2:
-            other = self.exprs[0]
-            if (
-                isinstance(other, self.__class__)
-                and not other.parseAction
-                and other.resultsName is None
-                and not other.debug
-            ):
-                self.exprs = other.exprs[:] + [self.exprs[1]]
-                self._defaultName = None
-                self.mayReturnEmpty |= other.mayReturnEmpty
-                self.mayIndexError |= other.mayIndexError
-
-            other = self.exprs[-1]
-            if (
-                isinstance(other, self.__class__)
-                and not other.parseAction
-                and other.resultsName is None
-                and not other.debug
-            ):
-                self.exprs = self.exprs[:-1] + other.exprs[:]
-                self._defaultName = None
-                self.mayReturnEmpty |= other.mayReturnEmpty
-                self.mayIndexError |= other.mayIndexError
-
-        self.errmsg = "Expected " + str(self)
-
-        return self
-
-    def validate(self, validateTrace=None) -> None:
-        tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
-        for e in self.exprs:
-            e.validate(tmp)
-        self._checkRecursion([])
-
-    def copy(self) -> ParserElement:
-        ret = super().copy()
-        ret.exprs = [e.copy() for e in self.exprs]
-        return ret
-
-    def _setResultsName(self, name, listAllMatches=False):
-        if (
-            __diag__.warn_ungrouped_named_tokens_in_collection
-            and Diagnostics.warn_ungrouped_named_tokens_in_collection
-            not in self.suppress_warnings_
-        ):
-            for e in self.exprs:
-                if (
-                    isinstance(e, ParserElement)
-                    and e.resultsName
-                    and Diagnostics.warn_ungrouped_named_tokens_in_collection
-                    not in e.suppress_warnings_
-                ):
-                    warnings.warn(
-                        "{}: setting results name {!r} on {} expression "
-                        "collides with {!r} on contained expression".format(
-                            "warn_ungrouped_named_tokens_in_collection",
-                            name,
-                            type(self).__name__,
-                            e.resultsName,
-                        ),
-                        stacklevel=3,
-                    )
-
-        return super()._setResultsName(name, listAllMatches)
-
-    ignoreWhitespace = ignore_whitespace
-    leaveWhitespace = leave_whitespace
-
-
-class And(ParseExpression):
-    """
-    Requires all given :class:`ParseExpression` s to be found in the given order.
-    Expressions may be separated by whitespace.
-    May be constructed using the ``'+'`` operator.
-    May also be constructed using the ``'-'`` operator, which will
-    suppress backtracking.
-
-    Example::
-
-        integer = Word(nums)
-        name_expr = OneOrMore(Word(alphas))
-
-        expr = And([integer("id"), name_expr("name"), integer("age")])
-        # more easily written as:
-        expr = integer("id") + name_expr("name") + integer("age")
-    """
-
-    class _ErrorStop(Empty):
-        def __init__(self, *args, **kwargs):
-            super().__init__(*args, **kwargs)
-            self.leave_whitespace()
-
-        def _generateDefaultName(self):
-            return "-"
-
-    def __init__(self, exprs_arg: IterableType[ParserElement], savelist: bool = True):
-        exprs: List[ParserElement] = list(exprs_arg)
-        if exprs and Ellipsis in exprs:
-            tmp = []
-            for i, expr in enumerate(exprs):
-                if expr is Ellipsis:
-                    if i < len(exprs) - 1:
-                        skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1]
-                        tmp.append(SkipTo(skipto_arg)("_skipped*"))
-                    else:
-                        raise Exception(
-                            "cannot construct And with sequence ending in ..."
-                        )
-                else:
-                    tmp.append(expr)
-            exprs[:] = tmp
-        super().__init__(exprs, savelist)
-        if self.exprs:
-            self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
-            if not isinstance(self.exprs[0], White):
-                self.set_whitespace_chars(
-                    self.exprs[0].whiteChars,
-                    copy_defaults=self.exprs[0].copyDefaultWhiteChars,
-                )
-                self.skipWhitespace = self.exprs[0].skipWhitespace
-            else:
-                self.skipWhitespace = False
-        else:
-            self.mayReturnEmpty = True
-        self.callPreparse = True
-
-    def streamline(self) -> ParserElement:
-        # collapse any _PendingSkip's
-        if self.exprs:
-            if any(
-                isinstance(e, ParseExpression)
-                and e.exprs
-                and isinstance(e.exprs[-1], _PendingSkip)
-                for e in self.exprs[:-1]
-            ):
-                for i, e in enumerate(self.exprs[:-1]):
-                    if e is None:
-                        continue
-                    if (
-                        isinstance(e, ParseExpression)
-                        and e.exprs
-                        and isinstance(e.exprs[-1], _PendingSkip)
-                    ):
-                        e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
-                        self.exprs[i + 1] = None
-                self.exprs = [e for e in self.exprs if e is not None]
-
-        super().streamline()
-
-        # link any IndentedBlocks to the prior expression
-        for prev, cur in zip(self.exprs, self.exprs[1:]):
-            # traverse cur or any first embedded expr of cur looking for an IndentedBlock
-            # (but watch out for recursive grammar)
-            seen = set()
-            while cur:
-                if id(cur) in seen:
-                    break
-                seen.add(id(cur))
-                if isinstance(cur, IndentedBlock):
-                    prev.add_parse_action(
-                        lambda s, l, t, cur_=cur: setattr(
-                            cur_, "parent_anchor", col(l, s)
-                        )
-                    )
-                    break
-                subs = cur.recurse()
-                cur = next(iter(subs), None)
-
-        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
-        return self
-
-    def parseImpl(self, instring, loc, doActions=True):
-        # pass False as callPreParse arg to _parse for first element, since we already
-        # pre-parsed the string as part of our And pre-parsing
-        loc, resultlist = self.exprs[0]._parse(
-            instring, loc, doActions, callPreParse=False
-        )
-        errorStop = False
-        for e in self.exprs[1:]:
-            # if isinstance(e, And._ErrorStop):
-            if type(e) is And._ErrorStop:
-                errorStop = True
-                continue
-            if errorStop:
-                try:
-                    loc, exprtokens = e._parse(instring, loc, doActions)
-                except ParseSyntaxException:
-                    raise
-                except ParseBaseException as pe:
-                    pe.__traceback__ = None
-                    raise ParseSyntaxException._from_exception(pe)
-                except IndexError:
-                    raise ParseSyntaxException(
-                        instring, len(instring), self.errmsg, self
-                    )
-            else:
-                loc, exprtokens = e._parse(instring, loc, doActions)
-            if exprtokens or exprtokens.haskeys():
-                resultlist += exprtokens
-        return loc, resultlist
-
-    def __iadd__(self, other):
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        return self.append(other)  # And([self, other])
-
-    def _checkRecursion(self, parseElementList):
-        subRecCheckList = parseElementList[:] + [self]
-        for e in self.exprs:
-            e._checkRecursion(subRecCheckList)
-            if not e.mayReturnEmpty:
-                break
-
-    def _generateDefaultName(self):
-        inner = " ".join(str(e) for e in self.exprs)
-        # strip off redundant inner {}'s
-        while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
-            inner = inner[1:-1]
-        return "{" + inner + "}"
-
-
-class Or(ParseExpression):
-    """Requires that at least one :class:`ParseExpression` is found. If
-    two expressions match, the expression that matches the longest
-    string will be used. May be constructed using the ``'^'``
-    operator.
-
-    Example::
-
-        # construct Or using '^' operator
-
-        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
-        print(number.search_string("123 3.1416 789"))
-
-    prints::
-
-        [['123'], ['3.1416'], ['789']]
-    """
-
-    def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False):
-        super().__init__(exprs, savelist)
-        if self.exprs:
-            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
-            self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
-        else:
-            self.mayReturnEmpty = True
-
-    def streamline(self) -> ParserElement:
-        super().streamline()
-        if self.exprs:
-            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
-            self.saveAsList = any(e.saveAsList for e in self.exprs)
-            self.skipWhitespace = all(
-                e.skipWhitespace and not isinstance(e, White) for e in self.exprs
-            )
-        else:
-            self.saveAsList = False
-        return self
-
-    def parseImpl(self, instring, loc, doActions=True):
-        maxExcLoc = -1
-        maxException = None
-        matches = []
-        fatals = []
-        if all(e.callPreparse for e in self.exprs):
-            loc = self.preParse(instring, loc)
-        for e in self.exprs:
-            try:
-                loc2 = e.try_parse(instring, loc, raise_fatal=True)
-            except ParseFatalException as pfe:
-                pfe.__traceback__ = None
-                pfe.parserElement = e
-                fatals.append(pfe)
-                maxException = None
-                maxExcLoc = -1
-            except ParseException as err:
-                if not fatals:
-                    err.__traceback__ = None
-                    if err.loc > maxExcLoc:
-                        maxException = err
-                        maxExcLoc = err.loc
-            except IndexError:
-                if len(instring) > maxExcLoc:
-                    maxException = ParseException(
-                        instring, len(instring), e.errmsg, self
-                    )
-                    maxExcLoc = len(instring)
-            else:
-                # save match among all matches, to retry longest to shortest
-                matches.append((loc2, e))
-
-        if matches:
-            # re-evaluate all matches in descending order of length of match, in case attached actions
-            # might change whether or how much they match of the input.
-            matches.sort(key=itemgetter(0), reverse=True)
-
-            if not doActions:
-                # no further conditions or parse actions to change the selection of
-                # alternative, so the first match will be the best match
-                best_expr = matches[0][1]
-                return best_expr._parse(instring, loc, doActions)
-
-            longest = -1, None
-            for loc1, expr1 in matches:
-                if loc1 <= longest[0]:
-                    # already have a longer match than this one will deliver, we are done
-                    return longest
-
-                try:
-                    loc2, toks = expr1._parse(instring, loc, doActions)
-                except ParseException as err:
-                    err.__traceback__ = None
-                    if err.loc > maxExcLoc:
-                        maxException = err
-                        maxExcLoc = err.loc
-                else:
-                    if loc2 >= loc1:
-                        return loc2, toks
-                    # didn't match as much as before
-                    elif loc2 > longest[0]:
-                        longest = loc2, toks
-
-            if longest != (-1, None):
-                return longest
-
-        if fatals:
-            if len(fatals) > 1:
-                fatals.sort(key=lambda e: -e.loc)
-                if fatals[0].loc == fatals[1].loc:
-                    fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
-            max_fatal = fatals[0]
-            raise max_fatal
-
-        if maxException is not None:
-            maxException.msg = self.errmsg
-            raise maxException
-        else:
-            raise ParseException(
-                instring, loc, "no defined alternatives to match", self
-            )
-
-    def __ixor__(self, other):
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        return self.append(other)  # Or([self, other])
-
-    def _generateDefaultName(self):
-        return "{" + " ^ ".join(str(e) for e in self.exprs) + "}"
-
-    def _setResultsName(self, name, listAllMatches=False):
-        if (
-            __diag__.warn_multiple_tokens_in_named_alternation
-            and Diagnostics.warn_multiple_tokens_in_named_alternation
-            not in self.suppress_warnings_
-        ):
-            if any(
-                isinstance(e, And)
-                and Diagnostics.warn_multiple_tokens_in_named_alternation
-                not in e.suppress_warnings_
-                for e in self.exprs
-            ):
-                warnings.warn(
-                    "{}: setting results name {!r} on {} expression "
-                    "will return a list of all parsed tokens in an And alternative, "
-                    "in prior versions only the first token was returned; enclose "
-                    "contained argument in Group".format(
-                        "warn_multiple_tokens_in_named_alternation",
-                        name,
-                        type(self).__name__,
-                    ),
-                    stacklevel=3,
-                )
-
-        return super()._setResultsName(name, listAllMatches)
-
-
-class MatchFirst(ParseExpression):
-    """Requires that at least one :class:`ParseExpression` is found. If
-    more than one expression matches, the first one listed is the one that will
-    match. May be constructed using the ``'|'`` operator.
-
-    Example::
-
-        # construct MatchFirst using '|' operator
-
-        # watch the order of expressions to match
-        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
-        print(number.search_string("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]
-
-        # put more selective expression first
-        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
-        print(number.search_string("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
-    """
-
-    def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False):
-        super().__init__(exprs, savelist)
-        if self.exprs:
-            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
-            self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
-        else:
-            self.mayReturnEmpty = True
-
-    def streamline(self) -> ParserElement:
-        if self.streamlined:
-            return self
-
-        super().streamline()
-        if self.exprs:
-            self.saveAsList = any(e.saveAsList for e in self.exprs)
-            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
-            self.skipWhitespace = all(
-                e.skipWhitespace and not isinstance(e, White) for e in self.exprs
-            )
-        else:
-            self.saveAsList = False
-            self.mayReturnEmpty = True
-        return self
-
-    def parseImpl(self, instring, loc, doActions=True):
-        maxExcLoc = -1
-        maxException = None
-
-        for e in self.exprs:
-            try:
-                return e._parse(
-                    instring,
-                    loc,
-                    doActions,
-                )
-            except ParseFatalException as pfe:
-                pfe.__traceback__ = None
-                pfe.parserElement = e
-                raise
-            except ParseException as err:
-                if err.loc > maxExcLoc:
-                    maxException = err
-                    maxExcLoc = err.loc
-            except IndexError:
-                if len(instring) > maxExcLoc:
-                    maxException = ParseException(
-                        instring, len(instring), e.errmsg, self
-                    )
-                    maxExcLoc = len(instring)
-
-        if maxException is not None:
-            maxException.msg = self.errmsg
-            raise maxException
-        else:
-            raise ParseException(
-                instring, loc, "no defined alternatives to match", self
-            )
-
-    def __ior__(self, other):
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        return self.append(other)  # MatchFirst([self, other])
-
-    def _generateDefaultName(self):
-        return "{" + " | ".join(str(e) for e in self.exprs) + "}"
-
-    def _setResultsName(self, name, listAllMatches=False):
-        if (
-            __diag__.warn_multiple_tokens_in_named_alternation
-            and Diagnostics.warn_multiple_tokens_in_named_alternation
-            not in self.suppress_warnings_
-        ):
-            if any(
-                isinstance(e, And)
-                and Diagnostics.warn_multiple_tokens_in_named_alternation
-                not in e.suppress_warnings_
-                for e in self.exprs
-            ):
-                warnings.warn(
-                    "{}: setting results name {!r} on {} expression "
-                    "will return a list of all parsed tokens in an And alternative, "
-                    "in prior versions only the first token was returned; enclose "
-                    "contained argument in Group".format(
-                        "warn_multiple_tokens_in_named_alternation",
-                        name,
-                        type(self).__name__,
-                    ),
-                    stacklevel=3,
-                )
-
-        return super()._setResultsName(name, listAllMatches)
-
-
-class Each(ParseExpression):
-    """Requires all given :class:`ParseExpression` s to be found, but in
-    any order. Expressions may be separated by whitespace.
-
-    May be constructed using the ``'&'`` operator.
-
-    Example::
-
-        color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
-        shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
-        integer = Word(nums)
-        shape_attr = "shape:" + shape_type("shape")
-        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
-        color_attr = "color:" + color("color")
-        size_attr = "size:" + integer("size")
-
-        # use Each (using operator '&') to accept attributes in any order
-        # (shape and posn are required, color and size are optional)
-        shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr)
-
-        shape_spec.run_tests('''
-            shape: SQUARE color: BLACK posn: 100, 120
-            shape: CIRCLE size: 50 color: BLUE posn: 50,80
-            color:GREEN size:20 shape:TRIANGLE posn:20,40
-            '''
-            )
-
-    prints::
-
-        shape: SQUARE color: BLACK posn: 100, 120
-        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
-        - color: BLACK
-        - posn: ['100', ',', '120']
-          - x: 100
-          - y: 120
-        - shape: SQUARE
-
-
-        shape: CIRCLE size: 50 color: BLUE posn: 50,80
-        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
-        - color: BLUE
-        - posn: ['50', ',', '80']
-          - x: 50
-          - y: 80
-        - shape: CIRCLE
-        - size: 50
-
-
-        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
-        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
-        - color: GREEN
-        - posn: ['20', ',', '40']
-          - x: 20
-          - y: 40
-        - shape: TRIANGLE
-        - size: 20
-    """
-
-    def __init__(self, exprs: IterableType[ParserElement], savelist: bool = True):
-        super().__init__(exprs, savelist)
-        if self.exprs:
-            self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
-        else:
-            self.mayReturnEmpty = True
-        self.skipWhitespace = True
-        self.initExprGroups = True
-        self.saveAsList = True
-
-    def streamline(self) -> ParserElement:
-        super().streamline()
-        if self.exprs:
-            self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
-        else:
-            self.mayReturnEmpty = True
-        return self
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if self.initExprGroups:
-            self.opt1map = dict(
-                (id(e.expr), e) for e in self.exprs if isinstance(e, Opt)
-            )
-            opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)]
-            opt2 = [
-                e
-                for e in self.exprs
-                if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore))
-            ]
-            self.optionals = opt1 + opt2
-            self.multioptionals = [
-                e.expr.set_results_name(e.resultsName, list_all_matches=True)
-                for e in self.exprs
-                if isinstance(e, _MultipleMatch)
-            ]
-            self.multirequired = [
-                e.expr.set_results_name(e.resultsName, list_all_matches=True)
-                for e in self.exprs
-                if isinstance(e, OneOrMore)
-            ]
-            self.required = [
-                e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore))
-            ]
-            self.required += self.multirequired
-            self.initExprGroups = False
-
-        tmpLoc = loc
-        tmpReqd = self.required[:]
-        tmpOpt = self.optionals[:]
-        multis = self.multioptionals[:]
-        matchOrder = []
-
-        keepMatching = True
-        failed = []
-        fatals = []
-        while keepMatching:
-            tmpExprs = tmpReqd + tmpOpt + multis
-            failed.clear()
-            fatals.clear()
-            for e in tmpExprs:
-                try:
-                    tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True)
-                except ParseFatalException as pfe:
-                    pfe.__traceback__ = None
-                    pfe.parserElement = e
-                    fatals.append(pfe)
-                    failed.append(e)
-                except ParseException:
-                    failed.append(e)
-                else:
-                    matchOrder.append(self.opt1map.get(id(e), e))
-                    if e in tmpReqd:
-                        tmpReqd.remove(e)
-                    elif e in tmpOpt:
-                        tmpOpt.remove(e)
-            if len(failed) == len(tmpExprs):
-                keepMatching = False
-
-        # look for any ParseFatalExceptions
-        if fatals:
-            if len(fatals) > 1:
-                fatals.sort(key=lambda e: -e.loc)
-                if fatals[0].loc == fatals[1].loc:
-                    fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
-            max_fatal = fatals[0]
-            raise max_fatal
-
-        if tmpReqd:
-            missing = ", ".join([str(e) for e in tmpReqd])
-            raise ParseException(
-                instring,
-                loc,
-                "Missing one or more required elements ({})".format(missing),
-            )
-
-        # add any unmatched Opts, in case they have default values defined
-        matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt]
-
-        total_results = ParseResults([])
-        for e in matchOrder:
-            loc, results = e._parse(instring, loc, doActions)
-            total_results += results
-
-        return loc, total_results
-
-    def _generateDefaultName(self):
-        return "{" + " & ".join(str(e) for e in self.exprs) + "}"
-
-
-class ParseElementEnhance(ParserElement):
-    """Abstract subclass of :class:`ParserElement`, for combining and
-    post-processing parsed tokens.
-    """
-
-    def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
-        super().__init__(savelist)
-        if isinstance(expr, str_type):
-            if issubclass(self._literalStringClass, Token):
-                expr = self._literalStringClass(expr)
-            elif issubclass(type(self), self._literalStringClass):
-                expr = Literal(expr)
-            else:
-                expr = self._literalStringClass(Literal(expr))
-        self.expr = expr
-        if expr is not None:
-            self.mayIndexError = expr.mayIndexError
-            self.mayReturnEmpty = expr.mayReturnEmpty
-            self.set_whitespace_chars(
-                expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars
-            )
-            self.skipWhitespace = expr.skipWhitespace
-            self.saveAsList = expr.saveAsList
-            self.callPreparse = expr.callPreparse
-            self.ignoreExprs.extend(expr.ignoreExprs)
-
-    def recurse(self) -> Sequence[ParserElement]:
-        return [self.expr] if self.expr is not None else []
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if self.expr is not None:
-            return self.expr._parse(instring, loc, doActions, callPreParse=False)
-        else:
-            raise ParseException(instring, loc, "No expression defined", self)
-
-    def leave_whitespace(self, recursive: bool = True) -> ParserElement:
-        super().leave_whitespace(recursive)
-
-        if recursive:
-            self.expr = self.expr.copy()
-            if self.expr is not None:
-                self.expr.leave_whitespace(recursive)
-        return self
-
-    def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
-        super().ignore_whitespace(recursive)
-
-        if recursive:
-            self.expr = self.expr.copy()
-            if self.expr is not None:
-                self.expr.ignore_whitespace(recursive)
-        return self
-
-    def ignore(self, other) -> ParserElement:
-        if isinstance(other, Suppress):
-            if other not in self.ignoreExprs:
-                super().ignore(other)
-                if self.expr is not None:
-                    self.expr.ignore(self.ignoreExprs[-1])
-        else:
-            super().ignore(other)
-            if self.expr is not None:
-                self.expr.ignore(self.ignoreExprs[-1])
-        return self
-
-    def streamline(self) -> ParserElement:
-        super().streamline()
-        if self.expr is not None:
-            self.expr.streamline()
-        return self
-
-    def _checkRecursion(self, parseElementList):
-        if self in parseElementList:
-            raise RecursiveGrammarException(parseElementList + [self])
-        subRecCheckList = parseElementList[:] + [self]
-        if self.expr is not None:
-            self.expr._checkRecursion(subRecCheckList)
-
-    def validate(self, validateTrace=None) -> None:
-        if validateTrace is None:
-            validateTrace = []
-        tmp = validateTrace[:] + [self]
-        if self.expr is not None:
-            self.expr.validate(tmp)
-        self._checkRecursion([])
-
-    def _generateDefaultName(self):
-        return "{}:({})".format(self.__class__.__name__, str(self.expr))
-
-    ignoreWhitespace = ignore_whitespace
-    leaveWhitespace = leave_whitespace
-
-
-class IndentedBlock(ParseElementEnhance):
-    """
-    Expression to match one or more expressions at a given indentation level.
-    Useful for parsing text where structure is implied by indentation (like Python source code).
-    """
-
-    class _Indent(Empty):
-        def __init__(self, ref_col: int):
-            super().__init__()
-            self.errmsg = "expected indent at column {}".format(ref_col)
-            self.add_condition(lambda s, l, t: col(l, s) == ref_col)
-
-    class _IndentGreater(Empty):
-        def __init__(self, ref_col: int):
-            super().__init__()
-            self.errmsg = "expected indent at column greater than {}".format(ref_col)
-            self.add_condition(lambda s, l, t: col(l, s) > ref_col)
-
-    def __init__(
-        self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True
-    ):
-        super().__init__(expr, savelist=True)
-        # if recursive:
-        #     raise NotImplementedError("IndentedBlock with recursive is not implemented")
-        self._recursive = recursive
-        self._grouped = grouped
-        self.parent_anchor = 1
-
-    def parseImpl(self, instring, loc, doActions=True):
-        # advance parse position to non-whitespace by using an Empty()
-        # this should be the column to be used for all subsequent indented lines
-        anchor_loc = Empty().preParse(instring, loc)
-
-        # see if self.expr matches at the current location - if not it will raise an exception
-        # and no further work is necessary
-        self.expr.try_parse(instring, anchor_loc, doActions)
-
-        indent_col = col(anchor_loc, instring)
-        peer_detect_expr = self._Indent(indent_col)
-
-        inner_expr = Empty() + peer_detect_expr + self.expr
-        if self._recursive:
-            sub_indent = self._IndentGreater(indent_col)
-            nested_block = IndentedBlock(
-                self.expr, recursive=self._recursive, grouped=self._grouped
-            )
-            nested_block.set_debug(self.debug)
-            nested_block.parent_anchor = indent_col
-            inner_expr += Opt(sub_indent + nested_block)
-
-        inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}")
-        block = OneOrMore(inner_expr)
-
-        trailing_undent = self._Indent(self.parent_anchor) | StringEnd()
-
-        if self._grouped:
-            wrapper = Group
-        else:
-            wrapper = lambda expr: expr
-        return (wrapper(block) + Optional(trailing_undent)).parseImpl(
-            instring, anchor_loc, doActions
-        )
-
-
-class AtStringStart(ParseElementEnhance):
-    """Matches if expression matches at the beginning of the parse
-    string::
-
-        AtStringStart(Word(nums)).parse_string("123")
-        # prints ["123"]
-
-        AtStringStart(Word(nums)).parse_string("    123")
-        # raises ParseException
-    """
-
-    def __init__(self, expr: Union[ParserElement, str]):
-        super().__init__(expr)
-        self.callPreparse = False
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if loc != 0:
-            raise ParseException(instring, loc, "not found at string start")
-        return super().parseImpl(instring, loc, doActions)
-
-
-class AtLineStart(ParseElementEnhance):
-    r"""Matches if an expression matches at the beginning of a line within
-    the parse string
-
-    Example::
-
-        test = '''\
-        AAA this line
-        AAA and this line
-          AAA but not this one
-        B AAA and definitely not this one
-        '''
-
-        for t in (AtLineStart('AAA') + restOfLine).search_string(test):
-            print(t)
-
-    prints::
-
-        ['AAA', ' this line']
-        ['AAA', ' and this line']
-
-    """
-
-    def __init__(self, expr: Union[ParserElement, str]):
-        super().__init__(expr)
-        self.callPreparse = False
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if col(loc, instring) != 1:
-            raise ParseException(instring, loc, "not found at line start")
-        return super().parseImpl(instring, loc, doActions)
-
-
-class FollowedBy(ParseElementEnhance):
-    """Lookahead matching of the given parse expression.
-    ``FollowedBy`` does *not* advance the parsing position within
-    the input string, it only verifies that the specified parse
-    expression matches at the current position.  ``FollowedBy``
-    always returns a null token list. If any results names are defined
-    in the lookahead expression, those *will* be returned for access by
-    name.
-
-    Example::
-
-        # use FollowedBy to match a label only if it is followed by a ':'
-        data_word = Word(alphas)
-        label = data_word + FollowedBy(':')
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
-
-        OneOrMore(attr_expr).parse_string("shape: SQUARE color: BLACK posn: upper left").pprint()
-
-    prints::
-
-        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
-    """
-
-    def __init__(self, expr: Union[ParserElement, str]):
-        super().__init__(expr)
-        self.mayReturnEmpty = True
-
-    def parseImpl(self, instring, loc, doActions=True):
-        # by using self._expr.parse and deleting the contents of the returned ParseResults list
-        # we keep any named results that were defined in the FollowedBy expression
-        _, ret = self.expr._parse(instring, loc, doActions=doActions)
-        del ret[:]
-
-        return loc, ret
-
-
-class PrecededBy(ParseElementEnhance):
-    """Lookbehind matching of the given parse expression.
-    ``PrecededBy`` does not advance the parsing position within the
-    input string, it only verifies that the specified parse expression
-    matches prior to the current position.  ``PrecededBy`` always
-    returns a null token list, but if a results name is defined on the
-    given expression, it is returned.
-
-    Parameters:
-
-    - expr - expression that must match prior to the current parse
-      location
-    - retreat - (default= ``None``) - (int) maximum number of characters
-      to lookbehind prior to the current parse location
-
-    If the lookbehind expression is a string, :class:`Literal`,
-    :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn`
-    with a specified exact or maximum length, then the retreat
-    parameter is not required. Otherwise, retreat must be specified to
-    give a maximum number of characters to look back from
-    the current parse position for a lookbehind match.
-
-    Example::
-
-        # VB-style variable names with type prefixes
-        int_var = PrecededBy("#") + pyparsing_common.identifier
-        str_var = PrecededBy("$") + pyparsing_common.identifier
-
-    """
-
-    def __init__(
-        self, expr: Union[ParserElement, str], retreat: OptionalType[int] = None
-    ):
-        super().__init__(expr)
-        self.expr = self.expr().leave_whitespace()
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-        self.exact = False
-        if isinstance(expr, str_type):
-            retreat = len(expr)
-            self.exact = True
-        elif isinstance(expr, (Literal, Keyword)):
-            retreat = expr.matchLen
-            self.exact = True
-        elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
-            retreat = expr.maxLen
-            self.exact = True
-        elif isinstance(expr, PositionToken):
-            retreat = 0
-            self.exact = True
-        self.retreat = retreat
-        self.errmsg = "not preceded by " + str(expr)
-        self.skipWhitespace = False
-        self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
-
-    def parseImpl(self, instring, loc=0, doActions=True):
-        if self.exact:
-            if loc < self.retreat:
-                raise ParseException(instring, loc, self.errmsg)
-            start = loc - self.retreat
-            _, ret = self.expr._parse(instring, start)
-        else:
-            # retreat specified a maximum lookbehind window, iterate
-            test_expr = self.expr + StringEnd()
-            instring_slice = instring[max(0, loc - self.retreat) : loc]
-            last_expr = ParseException(instring, loc, self.errmsg)
-            for offset in range(1, min(loc, self.retreat + 1) + 1):
-                try:
-                    # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
-                    _, ret = test_expr._parse(
-                        instring_slice, len(instring_slice) - offset
-                    )
-                except ParseBaseException as pbe:
-                    last_expr = pbe
-                else:
-                    break
-            else:
-                raise last_expr
-        return loc, ret
-
-
-class Located(ParseElementEnhance):
-    """
-    Decorates a returned token with its starting and ending
-    locations in the input string.
-
-    This helper adds the following results names:
-
-    - ``locn_start`` - location where matched expression begins
-    - ``locn_end`` - location where matched expression ends
-    - ``value`` - the actual parsed results
-
-    Be careful if the input text contains ``<TAB>`` characters, you
-    may want to call :class:`ParserElement.parse_with_tabs`
-
-    Example::
-
-        wd = Word(alphas)
-        for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
-            print(match)
-
-    prints::
-
-        [0, ['ljsdf'], 5]
-        [8, ['lksdjjf'], 15]
-        [18, ['lkkjj'], 23]
-
-    """
-
-    def parseImpl(self, instring, loc, doActions=True):
-        start = loc
-        loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False)
-        ret_tokens = ParseResults([start, tokens, loc])
-        ret_tokens["locn_start"] = start
-        ret_tokens["value"] = tokens
-        ret_tokens["locn_end"] = loc
-        if self.resultsName:
-            # must return as a list, so that the name will be attached to the complete group
-            return loc, [ret_tokens]
-        else:
-            return loc, ret_tokens
-
-
-class NotAny(ParseElementEnhance):
-    """
-    Lookahead to disallow matching with the given parse expression.
-    ``NotAny`` does *not* advance the parsing position within the
-    input string, it only verifies that the specified parse expression
-    does *not* match at the current position.  Also, ``NotAny`` does
-    *not* skip over leading whitespace. ``NotAny`` always returns
-    a null token list.  May be constructed using the ``'~'`` operator.
-
-    Example::
-
-        AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
-
-        # take care not to mistake keywords for identifiers
-        ident = ~(AND | OR | NOT) + Word(alphas)
-        boolean_term = Opt(NOT) + ident
-
-        # very crude boolean expression - to support parenthesis groups and
-        # operation hierarchy, use infix_notation
-        boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)
-
-        # integers that are followed by "." are actually floats
-        integer = Word(nums) + ~Char(".")
-    """
-
-    def __init__(self, expr: Union[ParserElement, str]):
-        super().__init__(expr)
-        # do NOT use self.leave_whitespace(), don't want to propagate to exprs
-        # self.leave_whitespace()
-        self.skipWhitespace = False
-
-        self.mayReturnEmpty = True
-        self.errmsg = "Found unwanted token, " + str(self.expr)
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if self.expr.can_parse_next(instring, loc):
-            raise ParseException(instring, loc, self.errmsg, self)
-        return loc, []
-
-    def _generateDefaultName(self):
-        return "~{" + str(self.expr) + "}"
-
-
-class _MultipleMatch(ParseElementEnhance):
-    def __init__(
-        self,
-        expr: ParserElement,
-        stop_on: OptionalType[Union[ParserElement, str]] = None,
-        *,
-        stopOn: OptionalType[Union[ParserElement, str]] = None,
-    ):
-        super().__init__(expr)
-        stopOn = stopOn or stop_on
-        self.saveAsList = True
-        ender = stopOn
-        if isinstance(ender, str_type):
-            ender = self._literalStringClass(ender)
-        self.stopOn(ender)
-
-    def stopOn(self, ender) -> ParserElement:
-        if isinstance(ender, str_type):
-            ender = self._literalStringClass(ender)
-        self.not_ender = ~ender if ender is not None else None
-        return self
-
-    def parseImpl(self, instring, loc, doActions=True):
-        self_expr_parse = self.expr._parse
-        self_skip_ignorables = self._skipIgnorables
-        check_ender = self.not_ender is not None
-        if check_ender:
-            try_not_ender = self.not_ender.tryParse
-
-        # must be at least one (but first see if we are the stopOn sentinel;
-        # if so, fail)
-        if check_ender:
-            try_not_ender(instring, loc)
-        loc, tokens = self_expr_parse(instring, loc, doActions)
-        try:
-            hasIgnoreExprs = not not self.ignoreExprs
-            while 1:
-                if check_ender:
-                    try_not_ender(instring, loc)
-                if hasIgnoreExprs:
-                    preloc = self_skip_ignorables(instring, loc)
-                else:
-                    preloc = loc
-                loc, tmptokens = self_expr_parse(instring, preloc, doActions)
-                if tmptokens or tmptokens.haskeys():
-                    tokens += tmptokens
-        except (ParseException, IndexError):
-            pass
-
-        return loc, tokens
-
-    def _setResultsName(self, name, listAllMatches=False):
-        if (
-            __diag__.warn_ungrouped_named_tokens_in_collection
-            and Diagnostics.warn_ungrouped_named_tokens_in_collection
-            not in self.suppress_warnings_
-        ):
-            for e in [self.expr] + self.expr.recurse():
-                if (
-                    isinstance(e, ParserElement)
-                    and e.resultsName
-                    and Diagnostics.warn_ungrouped_named_tokens_in_collection
-                    not in e.suppress_warnings_
-                ):
-                    warnings.warn(
-                        "{}: setting results name {!r} on {} expression "
-                        "collides with {!r} on contained expression".format(
-                            "warn_ungrouped_named_tokens_in_collection",
-                            name,
-                            type(self).__name__,
-                            e.resultsName,
-                        ),
-                        stacklevel=3,
-                    )
-
-        return super()._setResultsName(name, listAllMatches)
-
-
-class OneOrMore(_MultipleMatch):
-    """
-    Repetition of one or more of the given expression.
-
-    Parameters:
-    - expr - expression that must match one or more times
-    - stop_on - (default= ``None``) - expression for a terminating sentinel
-         (only required if the sentinel would ordinarily match the repetition
-         expression)
-
-    Example::
-
-        data_word = Word(alphas)
-        label = data_word + FollowedBy(':')
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join))
-
-        text = "shape: SQUARE posn: upper left color: BLACK"
-        OneOrMore(attr_expr).parse_string(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
-
-        # use stop_on attribute for OneOrMore to avoid reading label string as part of the data
-        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
-        OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
-
-        # could also be written as
-        (attr_expr * (1,)).parse_string(text).pprint()
-    """
-
-    def _generateDefaultName(self):
-        return "{" + str(self.expr) + "}..."
-
-
-class ZeroOrMore(_MultipleMatch):
-    """
-    Optional repetition of zero or more of the given expression.
-
-    Parameters:
-    - ``expr`` - expression that must match zero or more times
-    - ``stop_on`` - expression for a terminating sentinel
-      (only required if the sentinel would ordinarily match the repetition
-      expression) - (default= ``None``)
-
-    Example: similar to :class:`OneOrMore`
-    """
-
-    def __init__(
-        self,
-        expr: ParserElement,
-        stop_on: OptionalType[Union[ParserElement, str]] = None,
-        *,
-        stopOn: OptionalType[Union[ParserElement, str]] = None,
-    ):
-        super().__init__(expr, stopOn=stopOn or stop_on)
-        self.mayReturnEmpty = True
-
-    def parseImpl(self, instring, loc, doActions=True):
-        try:
-            return super().parseImpl(instring, loc, doActions)
-        except (ParseException, IndexError):
-            return loc, ParseResults([], name=self.resultsName)
-
-    def _generateDefaultName(self):
-        return "[" + str(self.expr) + "]..."
-
-
-class _NullToken:
-    def __bool__(self):
-        return False
-
-    def __str__(self):
-        return ""
-
-
-class Opt(ParseElementEnhance):
-    """
-    Optional matching of the given expression.
-
-    Parameters:
-    - ``expr`` - expression that must match zero or more times
-    - ``default`` (optional) - value to be returned if the optional expression is not found.
-
-    Example::
-
-        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
-        zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4)))
-        zip.run_tests('''
-            # traditional ZIP code
-            12345
-
-            # ZIP+4 form
-            12101-0001
-
-            # invalid ZIP
-            98765-
-            ''')
-
-    prints::
-
-        # traditional ZIP code
-        12345
-        ['12345']
-
-        # ZIP+4 form
-        12101-0001
-        ['12101-0001']
-
-        # invalid ZIP
-        98765-
-             ^
-        FAIL: Expected end of text (at char 5), (line:1, col:6)
-    """
-
-    __optionalNotMatched = _NullToken()
-
-    def __init__(
-        self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched
-    ):
-        super().__init__(expr, savelist=False)
-        self.saveAsList = self.expr.saveAsList
-        self.defaultValue = default
-        self.mayReturnEmpty = True
-
-    def parseImpl(self, instring, loc, doActions=True):
-        self_expr = self.expr
-        try:
-            loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False)
-        except (ParseException, IndexError):
-            default_value = self.defaultValue
-            if default_value is not self.__optionalNotMatched:
-                if self_expr.resultsName:
-                    tokens = ParseResults([default_value])
-                    tokens[self_expr.resultsName] = default_value
-                else:
-                    tokens = [default_value]
-            else:
-                tokens = []
-        return loc, tokens
-
-    def _generateDefaultName(self):
-        inner = str(self.expr)
-        # strip off redundant inner {}'s
-        while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
-            inner = inner[1:-1]
-        return "[" + inner + "]"
-
-
-Optional = Opt
-
-
-class SkipTo(ParseElementEnhance):
-    """
-    Token for skipping over all undefined text until the matched
-    expression is found.
-
-    Parameters:
-    - ``expr`` - target expression marking the end of the data to be skipped
-    - ``include`` - if ``True``, the target expression is also parsed
-      (the skipped text and target expression are returned as a 2-element
-      list) (default= ``False``).
-    - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and
-      comments) that might contain false matches to the target expression
-    - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be
-      included in the skipped test; if found before the target expression is found,
-      the :class:`SkipTo` is not a match
-
-    Example::
-
-        report = '''
-            Outstanding Issues Report - 1 Jan 2000
-
-               # | Severity | Description                               |  Days Open
-            -----+----------+-------------------------------------------+-----------
-             101 | Critical | Intermittent system crash                 |          6
-              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
-              79 | Minor    | System slow when running too many reports |         47
-            '''
-        integer = Word(nums)
-        SEP = Suppress('|')
-        # use SkipTo to simply match everything up until the next SEP
-        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
-        # - parse action will call token.strip() for each matched token, i.e., the description body
-        string_data = SkipTo(SEP, ignore=quoted_string)
-        string_data.set_parse_action(token_map(str.strip))
-        ticket_expr = (integer("issue_num") + SEP
-                      + string_data("sev") + SEP
-                      + string_data("desc") + SEP
-                      + integer("days_open"))
-
-        for tkt in ticket_expr.search_string(report):
-            print tkt.dump()
-
-    prints::
-
-        ['101', 'Critical', 'Intermittent system crash', '6']
-        - days_open: '6'
-        - desc: 'Intermittent system crash'
-        - issue_num: '101'
-        - sev: 'Critical'
-        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
-        - days_open: '14'
-        - desc: "Spelling error on Login ('log|n')"
-        - issue_num: '94'
-        - sev: 'Cosmetic'
-        ['79', 'Minor', 'System slow when running too many reports', '47']
-        - days_open: '47'
-        - desc: 'System slow when running too many reports'
-        - issue_num: '79'
-        - sev: 'Minor'
-    """
-
-    def __init__(
-        self,
-        other: Union[ParserElement, str],
-        include: bool = False,
-        ignore: bool = None,
-        fail_on: OptionalType[Union[ParserElement, str]] = None,
-        *,
-        failOn: Union[ParserElement, str] = None,
-    ):
-        super().__init__(other)
-        failOn = failOn or fail_on
-        self.ignoreExpr = ignore
-        self.mayReturnEmpty = True
-        self.mayIndexError = False
-        self.includeMatch = include
-        self.saveAsList = False
-        if isinstance(failOn, str_type):
-            self.failOn = self._literalStringClass(failOn)
-        else:
-            self.failOn = failOn
-        self.errmsg = "No match found for " + str(self.expr)
-
-    def parseImpl(self, instring, loc, doActions=True):
-        startloc = loc
-        instrlen = len(instring)
-        self_expr_parse = self.expr._parse
-        self_failOn_canParseNext = (
-            self.failOn.canParseNext if self.failOn is not None else None
-        )
-        self_ignoreExpr_tryParse = (
-            self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
-        )
-
-        tmploc = loc
-        while tmploc <= instrlen:
-            if self_failOn_canParseNext is not None:
-                # break if failOn expression matches
-                if self_failOn_canParseNext(instring, tmploc):
-                    break
-
-            if self_ignoreExpr_tryParse is not None:
-                # advance past ignore expressions
-                while 1:
-                    try:
-                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
-                    except ParseBaseException:
-                        break
-
-            try:
-                self_expr_parse(instring, tmploc, doActions=False, callPreParse=False)
-            except (ParseException, IndexError):
-                # no match, advance loc in string
-                tmploc += 1
-            else:
-                # matched skipto expr, done
-                break
-
-        else:
-            # ran off the end of the input string without matching skipto expr, fail
-            raise ParseException(instring, loc, self.errmsg, self)
-
-        # build up return values
-        loc = tmploc
-        skiptext = instring[startloc:loc]
-        skipresult = ParseResults(skiptext)
-
-        if self.includeMatch:
-            loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False)
-            skipresult += mat
-
-        return loc, skipresult
-
-
-class Forward(ParseElementEnhance):
-    """
-    Forward declaration of an expression to be defined later -
-    used for recursive grammars, such as algebraic infix notation.
-    When the expression is known, it is assigned to the ``Forward``
-    variable using the ``'<<'`` operator.
-
-    Note: take care when assigning to ``Forward`` not to overlook
-    precedence of operators.
-
-    Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that::
-
-        fwd_expr << a | b | c
-
-    will actually be evaluated as::
-
-        (fwd_expr << a) | b | c
-
-    thereby leaving b and c out as parseable alternatives.  It is recommended that you
-    explicitly group the values inserted into the ``Forward``::
-
-        fwd_expr << (a | b | c)
-
-    Converting to use the ``'<<='`` operator instead will avoid this problem.
-
-    See :class:`ParseResults.pprint` for an example of a recursive
-    parser created using ``Forward``.
-    """
-
-    def __init__(self, other: OptionalType[Union[ParserElement, str]] = None):
-        self.caller_frame = traceback.extract_stack(limit=2)[0]
-        super().__init__(other, savelist=False)
-        self.lshift_line = None
-
-    def __lshift__(self, other):
-        if hasattr(self, "caller_frame"):
-            del self.caller_frame
-        if isinstance(other, str_type):
-            other = self._literalStringClass(other)
-        self.expr = other
-        self.mayIndexError = self.expr.mayIndexError
-        self.mayReturnEmpty = self.expr.mayReturnEmpty
-        self.set_whitespace_chars(
-            self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars
-        )
-        self.skipWhitespace = self.expr.skipWhitespace
-        self.saveAsList = self.expr.saveAsList
-        self.ignoreExprs.extend(self.expr.ignoreExprs)
-        self.lshift_line = traceback.extract_stack(limit=2)[-2]
-        return self
-
-    def __ilshift__(self, other):
-        return self << other
-
-    def __or__(self, other):
-        caller_line = traceback.extract_stack(limit=2)[-2]
-        if (
-            __diag__.warn_on_match_first_with_lshift_operator
-            and caller_line == self.lshift_line
-            and Diagnostics.warn_on_match_first_with_lshift_operator
-            not in self.suppress_warnings_
-        ):
-            warnings.warn(
-                "using '<<' operator with '|' is probably an error, use '<<='",
-                stacklevel=2,
-            )
-        ret = super().__or__(other)
-        return ret
-
-    def __del__(self):
-        # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<'
-        if (
-            self.expr is None
-            and __diag__.warn_on_assignment_to_Forward
-            and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_
-        ):
-            warnings.warn_explicit(
-                "Forward defined here but no expression attached later using '<<=' or '<<'",
-                UserWarning,
-                filename=self.caller_frame.filename,
-                lineno=self.caller_frame.lineno,
-            )
-
-    def parseImpl(self, instring, loc, doActions=True):
-        if (
-            self.expr is None
-            and __diag__.warn_on_parse_using_empty_Forward
-            and Diagnostics.warn_on_parse_using_empty_Forward
-            not in self.suppress_warnings_
-        ):
-            # walk stack until parse_string, scan_string, search_string, or transform_string is found
-            parse_fns = [
-                "parse_string",
-                "scan_string",
-                "search_string",
-                "transform_string",
-            ]
-            tb = traceback.extract_stack(limit=200)
-            for i, frm in enumerate(reversed(tb), start=1):
-                if frm.name in parse_fns:
-                    stacklevel = i + 1
-                    break
-            else:
-                stacklevel = 2
-            warnings.warn(
-                "Forward expression was never assigned a value, will not parse any input",
-                stacklevel=stacklevel,
-            )
-        if not ParserElement._left_recursion_enabled:
-            return super().parseImpl(instring, loc, doActions)
-        # ## Bounded Recursion algorithm ##
-        # Recursion only needs to be processed at ``Forward`` elements, since they are
-        # the only ones that can actually refer to themselves. The general idea is
-        # to handle recursion stepwise: We start at no recursion, then recurse once,
-        # recurse twice, ..., until more recursion offers no benefit (we hit the bound).
-        #
-        # The "trick" here is that each ``Forward`` gets evaluated in two contexts
-        # - to *match* a specific recursion level, and
-        # - to *search* the bounded recursion level
-        # and the two run concurrently. The *search* must *match* each recursion level
-        # to find the best possible match. This is handled by a memo table, which
-        # provides the previous match to the next level match attempt.
-        #
-        # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al.
-        #
-        # There is a complication since we not only *parse* but also *transform* via
-        # actions: We do not want to run the actions too often while expanding. Thus,
-        # we expand using `doActions=False` and only run `doActions=True` if the next
-        # recursion level is acceptable.
-        with ParserElement.recursion_lock:
-            memo = ParserElement.recursion_memos
-            try:
-                # we are parsing at a specific recursion expansion - use it as-is
-                prev_loc, prev_result = memo[loc, self, doActions]
-                if isinstance(prev_result, Exception):
-                    raise prev_result
-                return prev_loc, prev_result.copy()
-            except KeyError:
-                act_key = (loc, self, True)
-                peek_key = (loc, self, False)
-                # we are searching for the best recursion expansion - keep on improving
-                # both `doActions` cases must be tracked separately here!
-                prev_loc, prev_peek = memo[peek_key] = (
-                    loc - 1,
-                    ParseException(
-                        instring, loc, "Forward recursion without base case", self
-                    ),
-                )
-                if doActions:
-                    memo[act_key] = memo[peek_key]
-                while True:
-                    try:
-                        new_loc, new_peek = super().parseImpl(instring, loc, False)
-                    except ParseException:
-                        # we failed before getting any match – do not hide the error
-                        if isinstance(prev_peek, Exception):
-                            raise
-                        new_loc, new_peek = prev_loc, prev_peek
-                    # the match did not get better: we are done
-                    if new_loc <= prev_loc:
-                        if doActions:
-                            # replace the match for doActions=False as well,
-                            # in case the action did backtrack
-                            prev_loc, prev_result = memo[peek_key] = memo[act_key]
-                            del memo[peek_key], memo[act_key]
-                            return prev_loc, prev_result.copy()
-                        del memo[peek_key]
-                        return prev_loc, prev_peek.copy()
-                    # the match did get better: see if we can improve further
-                    else:
-                        if doActions:
-                            try:
-                                memo[act_key] = super().parseImpl(instring, loc, True)
-                            except ParseException as e:
-                                memo[peek_key] = memo[act_key] = (new_loc, e)
-                                raise
-                        prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek
-
-    def leave_whitespace(self, recursive: bool = True) -> ParserElement:
-        self.skipWhitespace = False
-        return self
-
-    def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
-        self.skipWhitespace = True
-        return self
-
-    def streamline(self) -> ParserElement:
-        if not self.streamlined:
-            self.streamlined = True
-            if self.expr is not None:
-                self.expr.streamline()
-        return self
-
-    def validate(self, validateTrace=None) -> None:
-        if validateTrace is None:
-            validateTrace = []
-
-        if self not in validateTrace:
-            tmp = validateTrace[:] + [self]
-            if self.expr is not None:
-                self.expr.validate(tmp)
-        self._checkRecursion([])
-
-    def _generateDefaultName(self):
-        # Avoid infinite recursion by setting a temporary _defaultName
-        self._defaultName = ": ..."
-
-        # Use the string representation of main expression.
-        retString = "..."
-        try:
-            if self.expr is not None:
-                retString = str(self.expr)[:1000]
-            else:
-                retString = "None"
-        finally:
-            return self.__class__.__name__ + ": " + retString
-
-    def copy(self) -> ParserElement:
-        if self.expr is not None:
-            return super().copy()
-        else:
-            ret = Forward()
-            ret <<= self
-            return ret
-
-    def _setResultsName(self, name, list_all_matches=False):
-        if (
-            __diag__.warn_name_set_on_empty_Forward
-            and Diagnostics.warn_name_set_on_empty_Forward
-            not in self.suppress_warnings_
-        ):
-            if self.expr is None:
-                warnings.warn(
-                    "{}: setting results name {!r} on {} expression "
-                    "that has no contained expression".format(
-                        "warn_name_set_on_empty_Forward", name, type(self).__name__
-                    ),
-                    stacklevel=3,
-                )
-
-        return super()._setResultsName(name, list_all_matches)
-
-    ignoreWhitespace = ignore_whitespace
-    leaveWhitespace = leave_whitespace
-
-
-class TokenConverter(ParseElementEnhance):
-    """
-    Abstract subclass of :class:`ParseExpression`, for converting parsed results.
-    """
-
-    def __init__(self, expr: Union[ParserElement, str], savelist=False):
-        super().__init__(expr)  # , savelist)
-        self.saveAsList = False
-
-
-class Combine(TokenConverter):
-    """Converter to concatenate all matching tokens to a single string.
-    By default, the matching patterns must also be contiguous in the
-    input string; this can be disabled by specifying
-    ``'adjacent=False'`` in the constructor.
-
-    Example::
-
-        real = Word(nums) + '.' + Word(nums)
-        print(real.parse_string('3.1416')) # -> ['3', '.', '1416']
-        # will also erroneously match the following
-        print(real.parse_string('3. 1416')) # -> ['3', '.', '1416']
-
-        real = Combine(Word(nums) + '.' + Word(nums))
-        print(real.parse_string('3.1416')) # -> ['3.1416']
-        # no match when there are internal spaces
-        print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...)
-    """
-
-    def __init__(
-        self,
-        expr: ParserElement,
-        join_string: str = "",
-        adjacent: bool = True,
-        *,
-        joinString: OptionalType[str] = None,
-    ):
-        super().__init__(expr)
-        joinString = joinString if joinString is not None else join_string
-        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
-        if adjacent:
-            self.leave_whitespace()
-        self.adjacent = adjacent
-        self.skipWhitespace = True
-        self.joinString = joinString
-        self.callPreparse = True
-
-    def ignore(self, other) -> ParserElement:
-        if self.adjacent:
-            ParserElement.ignore(self, other)
-        else:
-            super().ignore(other)
-        return self
-
-    def postParse(self, instring, loc, tokenlist):
-        retToks = tokenlist.copy()
-        del retToks[:]
-        retToks += ParseResults(
-            ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
-        )
-
-        if self.resultsName and retToks.haskeys():
-            return [retToks]
-        else:
-            return retToks
-
-
-class Group(TokenConverter):
-    """Converter to return the matched tokens as a list - useful for
-    returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
-
-    The optional ``aslist`` argument when set to True will return the
-    parsed tokens as a Python list instead of a pyparsing ParseResults.
-
-    Example::
-
-        ident = Word(alphas)
-        num = Word(nums)
-        term = ident | num
-        func = ident + Opt(delimited_list(term))
-        print(func.parse_string("fn a, b, 100"))
-        # -> ['fn', 'a', 'b', '100']
-
-        func = ident + Group(Opt(delimited_list(term)))
-        print(func.parse_string("fn a, b, 100"))
-        # -> ['fn', ['a', 'b', '100']]
-    """
-
-    def __init__(self, expr: ParserElement, aslist: bool = False):
-        super().__init__(expr)
-        self.saveAsList = True
-        self._asPythonList = aslist
-
-    def postParse(self, instring, loc, tokenlist):
-        if self._asPythonList:
-            return ParseResults.List(
-                tokenlist.asList()
-                if isinstance(tokenlist, ParseResults)
-                else list(tokenlist)
-            )
-        else:
-            return [tokenlist]
-
-
-class Dict(TokenConverter):
-    """Converter to return a repetitive expression as a list, but also
-    as a dictionary. Each element can also be referenced using the first
-    token in the expression as its key. Useful for tabular report
-    scraping when the first column can be used as a item key.
-
-    The optional ``asdict`` argument when set to True will return the
-    parsed tokens as a Python dict instead of a pyparsing ParseResults.
-
-    Example::
-
-        data_word = Word(alphas)
-        label = data_word + FollowedBy(':')
-
-        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
-        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
-
-        # print attributes as plain groups
-        print(OneOrMore(attr_expr).parse_string(text).dump())
-
-        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
-        result = Dict(OneOrMore(Group(attr_expr))).parse_string(text)
-        print(result.dump())
-
-        # access named fields as dict entries, or output as dict
-        print(result['shape'])
-        print(result.as_dict())
-
-    prints::
-
-        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
-        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
-        - color: 'light blue'
-        - posn: 'upper left'
-        - shape: 'SQUARE'
-        - texture: 'burlap'
-        SQUARE
-        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
-
-    See more examples at :class:`ParseResults` of accessing fields by results name.
-    """
-
-    def __init__(self, expr: ParserElement, asdict: bool = False):
-        super().__init__(expr)
-        self.saveAsList = True
-        self._asPythonDict = asdict
-
-    def postParse(self, instring, loc, tokenlist):
-        for i, tok in enumerate(tokenlist):
-            if len(tok) == 0:
-                continue
-
-            ikey = tok[0]
-            if isinstance(ikey, int):
-                ikey = str(ikey).strip()
-
-            if len(tok) == 1:
-                tokenlist[ikey] = _ParseResultsWithOffset("", i)
-
-            elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
-                tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
-
-            else:
-                try:
-                    dictvalue = tok.copy()  # ParseResults(i)
-                except Exception:
-                    exc = TypeError(
-                        "could not extract dict values from parsed results"
-                        " - Dict expression must contain Grouped expressions"
-                    )
-                    raise exc from None
-
-                del dictvalue[0]
-
-                if len(dictvalue) != 1 or (
-                    isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
-                ):
-                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
-                else:
-                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
-
-        if self._asPythonDict:
-            return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict()
-        else:
-            return [tokenlist] if self.resultsName else tokenlist
-
-
-class Suppress(TokenConverter):
-    """Converter for ignoring the results of a parsed expression.
-
-    Example::
-
-        source = "a, b, c,d"
-        wd = Word(alphas)
-        wd_list1 = wd + ZeroOrMore(',' + wd)
-        print(wd_list1.parse_string(source))
-
-        # often, delimiters that are useful during parsing are just in the
-        # way afterward - use Suppress to keep them out of the parsed output
-        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
-        print(wd_list2.parse_string(source))
-
-        # Skipped text (using '...') can be suppressed as well
-        source = "lead in START relevant text END trailing text"
-        start_marker = Keyword("START")
-        end_marker = Keyword("END")
-        find_body = Suppress(...) + start_marker + ... + end_marker
-        print(find_body.parse_string(source)
-
-    prints::
-
-        ['a', ',', 'b', ',', 'c', ',', 'd']
-        ['a', 'b', 'c', 'd']
-        ['START', 'relevant text ', 'END']
-
-    (See also :class:`delimited_list`.)
-    """
-
-    def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
-        if expr is ...:
-            expr = _PendingSkip(NoMatch())
-        super().__init__(expr)
-
-    def __add__(self, other) -> "ParserElement":
-        if isinstance(self.expr, _PendingSkip):
-            return Suppress(SkipTo(other)) + other
-        else:
-            return super().__add__(other)
-
-    def __sub__(self, other) -> "ParserElement":
-        if isinstance(self.expr, _PendingSkip):
-            return Suppress(SkipTo(other)) - other
-        else:
-            return super().__sub__(other)
-
-    def postParse(self, instring, loc, tokenlist):
-        return []
-
-    def suppress(self) -> ParserElement:
-        return self
-
-
-def trace_parse_action(f: ParseAction) -> ParseAction:
-    """Decorator for debugging parse actions.
-
-    When the parse action is called, this decorator will print
-    ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
-    When the parse action completes, the decorator will print
-    ``"<<"`` followed by the returned value, or any exception that the parse action raised.
-
-    Example::
-
-        wd = Word(alphas)
-
-        @trace_parse_action
-        def remove_duplicate_chars(tokens):
-            return ''.join(sorted(set(''.join(tokens))))
-
-        wds = OneOrMore(wd).set_parse_action(remove_duplicate_chars)
-        print(wds.parse_string("slkdjs sld sldd sdlf sdljf"))
-
-    prints::
-
-        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
-        <<leaving remove_duplicate_chars (ret: 'dfjkls')
-        ['dfjkls']
-    """
-    f = _trim_arity(f)
-
-    def z(*paArgs):
-        thisFunc = f.__name__
-        s, l, t = paArgs[-3:]
-        if len(paArgs) > 3:
-            thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc
-        sys.stderr.write(
-            ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t)
-        )
-        try:
-            ret = f(*paArgs)
-        except Exception as exc:
-            sys.stderr.write("<<leaving {} (exception: {})\n".format(thisFunc, exc))
-            raise
-        sys.stderr.write("<<leaving {} (ret: {!r})\n".format(thisFunc, ret))
-        return ret
-
-    z.__name__ = f.__name__
-    return z
-
-
-# convenience constants for positional expressions
-empty = Empty().set_name("empty")
-line_start = LineStart().set_name("line_start")
-line_end = LineEnd().set_name("line_end")
-string_start = StringStart().set_name("string_start")
-string_end = StringEnd().set_name("string_end")
-
-_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).set_parse_action(
-    lambda s, l, t: t[0][1]
-)
-_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").set_parse_action(
-    lambda s, l, t: chr(int(t[0].lstrip(r"\0x"), 16))
-)
-_escapedOctChar = Regex(r"\\0[0-7]+").set_parse_action(
-    lambda s, l, t: chr(int(t[0][1:], 8))
-)
-_singleChar = (
-    _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r"\]", exact=1)
-)
-_charRange = Group(_singleChar + Suppress("-") + _singleChar)
-_reBracketExpr = (
-    Literal("[")
-    + Opt("^").set_results_name("negate")
-    + Group(OneOrMore(_charRange | _singleChar)).set_results_name("body")
-    + "]"
-)
-
-
-def srange(s: str) -> str:
-    r"""Helper to easily define string ranges for use in :class:`Word`
-    construction. Borrows syntax from regexp ``'[]'`` string range
-    definitions::
-
-        srange("[0-9]")   -> "0123456789"
-        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
-        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
-
-    The input string must be enclosed in []'s, and the returned string
-    is the expanded character set joined into a single string. The
-    values enclosed in the []'s may be:
-
-    - a single character
-    - an escaped character with a leading backslash (such as ``\-``
-      or ``\]``)
-    - an escaped hex character with a leading ``'\x'``
-      (``\x21``, which is a ``'!'`` character) (``\0x##``
-      is also supported for backwards compatibility)
-    - an escaped octal character with a leading ``'\0'``
-      (``\041``, which is a ``'!'`` character)
-    - a range of any of the above, separated by a dash (``'a-z'``,
-      etc.)
-    - any combination of the above (``'aeiouy'``,
-      ``'a-zA-Z0-9_$'``, etc.)
-    """
-    _expanded = (
-        lambda p: p
-        if not isinstance(p, ParseResults)
-        else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
-    )
-    try:
-        return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body)
-    except Exception:
-        return ""
-
-
-def token_map(func, *args) -> ParseAction:
-    """Helper to define a parse action by mapping a function to all
-    elements of a :class:`ParseResults` list. If any additional args are passed,
-    they are forwarded to the given function as additional arguments
-    after the token, as in
-    ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``,
-    which will convert the parsed data to an integer using base 16.
-
-    Example (compare the last to example in :class:`ParserElement.transform_string`::
-
-        hex_ints = OneOrMore(Word(hexnums)).set_parse_action(token_map(int, 16))
-        hex_ints.run_tests('''
-            00 11 22 aa FF 0a 0d 1a
-            ''')
-
-        upperword = Word(alphas).set_parse_action(token_map(str.upper))
-        OneOrMore(upperword).run_tests('''
-            my kingdom for a horse
-            ''')
-
-        wd = Word(alphas).set_parse_action(token_map(str.title))
-        OneOrMore(wd).set_parse_action(' '.join).run_tests('''
-            now is the winter of our discontent made glorious summer by this sun of york
-            ''')
-
-    prints::
-
-        00 11 22 aa FF 0a 0d 1a
-        [0, 17, 34, 170, 255, 10, 13, 26]
-
-        my kingdom for a horse
-        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
-
-        now is the winter of our discontent made glorious summer by this sun of york
-        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
-    """
-
-    def pa(s, l, t):
-        return [func(tokn, *args) for tokn in t]
-
-    func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
-    pa.__name__ = func_name
-
-    return pa
-
-
-def autoname_elements() -> None:
-    """
-    Utility to simplify mass-naming of parser elements, for
-    generating railroad diagram with named subdiagrams.
-    """
-    for name, var in sys._getframe().f_back.f_locals.items():
-        if isinstance(var, ParserElement) and not var.customName:
-            var.set_name(name)
-
-
-dbl_quoted_string = Combine(
-    Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
-).set_name("string enclosed in double quotes")
-
-sgl_quoted_string = Combine(
-    Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
-).set_name("string enclosed in single quotes")
-
-quoted_string = Combine(
-    Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
-    | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
-).set_name("quotedString using single or double quotes")
-
-unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal")
-
-
-alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
-punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
-
-# build list of built-in expressions, for future reference if a global default value
-# gets updated
-_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)]
-
-# backward compatibility names
-tokenMap = token_map
-conditionAsParseAction = condition_as_parse_action
-nullDebugAction = null_debug_action
-sglQuotedString = sgl_quoted_string
-dblQuotedString = dbl_quoted_string
-quotedString = quoted_string
-unicodeString = unicode_string
-lineStart = line_start
-lineEnd = line_end
-stringStart = string_start
-stringEnd = string_end
-traceParseAction = trace_parse_action
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py
deleted file mode 100644
index 2d0c587..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py
+++ /dev/null
@@ -1,611 +0,0 @@
-import railroad
-import pyparsing
-from pkg_resources import resource_filename
-from typing import (
-    List,
-    Optional,
-    NamedTuple,
-    Generic,
-    TypeVar,
-    Dict,
-    Callable,
-    Set,
-    Iterable,
-)
-from jinja2 import Template
-from io import StringIO
-import inspect
-
-
-with open(resource_filename(__name__, "template.jinja2"), encoding="utf-8") as fp:
-    template = Template(fp.read())
-
-# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
-NamedDiagram = NamedTuple(
-    "NamedDiagram",
-    [("name", str), ("diagram", Optional[railroad.DiagramItem]), ("index", int)],
-)
-"""
-A simple structure for associating a name with a railroad diagram
-"""
-
-T = TypeVar("T")
-
-
-class EachItem(railroad.Group):
-    """
-    Custom railroad item to compose a:
-    - Group containing a
-      - OneOrMore containing a
-        - Choice of the elements in the Each
-    with the group label indicating that all must be matched
-    """
-
-    all_label = "[ALL]"
-
-    def __init__(self, *items):
-        choice_item = railroad.Choice(len(items) - 1, *items)
-        one_or_more_item = railroad.OneOrMore(item=choice_item)
-        super().__init__(one_or_more_item, label=self.all_label)
-
-
-class AnnotatedItem(railroad.Group):
-    """
-    Simple subclass of Group that creates an annotation label
-    """
-
-    def __init__(self, label: str, item):
-        super().__init__(item=item, label="[{}]".format(label) if label else label)
-
-
-class EditablePartial(Generic[T]):
-    """
-    Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
-    constructed.
-    """
-
-    # We need this here because the railroad constructors actually transform the data, so can't be called until the
-    # entire tree is assembled
-
-    def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
-        self.func = func
-        self.args = args
-        self.kwargs = kwargs
-
-    @classmethod
-    def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
-        """
-        If you call this function in the same way that you would call the constructor, it will store the arguments
-        as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
-        """
-        return EditablePartial(func=func, args=list(args), kwargs=kwargs)
-
-    @property
-    def name(self):
-        return self.kwargs["name"]
-
-    def __call__(self) -> T:
-        """
-        Evaluate the partial and return the result
-        """
-        args = self.args.copy()
-        kwargs = self.kwargs.copy()
-
-        # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
-        # args=['list', 'of', 'things'])
-        arg_spec = inspect.getfullargspec(self.func)
-        if arg_spec.varargs in self.kwargs:
-            args += kwargs.pop(arg_spec.varargs)
-
-        return self.func(*args, **kwargs)
-
-
-def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
-    """
-    Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
-    :params kwargs: kwargs to be passed in to the template
-    """
-    data = []
-    for diagram in diagrams:
-        io = StringIO()
-        diagram.diagram.writeSvg(io.write)
-        title = diagram.name
-        if diagram.index == 0:
-            title += " (root)"
-        data.append({"title": title, "text": "", "svg": io.getvalue()})
-
-    return template.render(diagrams=data, **kwargs)
-
-
-def resolve_partial(partial: "EditablePartial[T]") -> T:
-    """
-    Recursively resolves a collection of Partials into whatever type they are
-    """
-    if isinstance(partial, EditablePartial):
-        partial.args = resolve_partial(partial.args)
-        partial.kwargs = resolve_partial(partial.kwargs)
-        return partial()
-    elif isinstance(partial, list):
-        return [resolve_partial(x) for x in partial]
-    elif isinstance(partial, dict):
-        return {key: resolve_partial(x) for key, x in partial.items()}
-    else:
-        return partial
-
-
-def to_railroad(
-    element: pyparsing.ParserElement,
-    diagram_kwargs: Optional[dict] = None,
-    vertical: int = 3,
-    show_results_names: bool = False,
-    show_groups: bool = False,
-) -> List[NamedDiagram]:
-    """
-    Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
-    creation if you want to access the Railroad tree before it is converted to HTML
-    :param element: base element of the parser being diagrammed
-    :param diagram_kwargs: kwargs to pass to the Diagram() constructor
-    :param vertical: (optional) - int - limit at which number of alternatives should be
-       shown vertically instead of horizontally
-    :param show_results_names - bool to indicate whether results name annotations should be
-       included in the diagram
-    :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
-       surrounding box
-    """
-    # Convert the whole tree underneath the root
-    lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
-    _to_diagram_element(
-        element,
-        lookup=lookup,
-        parent=None,
-        vertical=vertical,
-        show_results_names=show_results_names,
-        show_groups=show_groups,
-    )
-
-    root_id = id(element)
-    # Convert the root if it hasn't been already
-    if root_id in lookup:
-        if not element.customName:
-            lookup[root_id].name = ""
-        lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
-
-    # Now that we're finished, we can convert from intermediate structures into Railroad elements
-    diags = list(lookup.diagrams.values())
-    if len(diags) > 1:
-        # collapse out duplicate diags with the same name
-        seen = set()
-        deduped_diags = []
-        for d in diags:
-            # don't extract SkipTo elements, they are uninformative as subdiagrams
-            if d.name == "...":
-                continue
-            if d.name is not None and d.name not in seen:
-                seen.add(d.name)
-                deduped_diags.append(d)
-        resolved = [resolve_partial(partial) for partial in deduped_diags]
-    else:
-        # special case - if just one diagram, always display it, even if
-        # it has no name
-        resolved = [resolve_partial(partial) for partial in diags]
-    return sorted(resolved, key=lambda diag: diag.index)
-
-
-def _should_vertical(
-    specification: int, exprs: Iterable[pyparsing.ParserElement]
-) -> bool:
-    """
-    Returns true if we should return a vertical list of elements
-    """
-    if specification is None:
-        return False
-    else:
-        return len(_visible_exprs(exprs)) >= specification
-
-
-class ElementState:
-    """
-    State recorded for an individual pyparsing Element
-    """
-
-    # Note: this should be a dataclass, but we have to support Python 3.5
-    def __init__(
-        self,
-        element: pyparsing.ParserElement,
-        converted: EditablePartial,
-        parent: EditablePartial,
-        number: int,
-        name: str = None,
-        parent_index: Optional[int] = None,
-    ):
-        #: The pyparsing element that this represents
-        self.element: pyparsing.ParserElement = element
-        #: The name of the element
-        self.name: str = name
-        #: The output Railroad element in an unconverted state
-        self.converted: EditablePartial = converted
-        #: The parent Railroad element, which we store so that we can extract this if it's duplicated
-        self.parent: EditablePartial = parent
-        #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
-        self.number: int = number
-        #: The index of this inside its parent
-        self.parent_index: Optional[int] = parent_index
-        #: If true, we should extract this out into a subdiagram
-        self.extract: bool = False
-        #: If true, all of this element's children have been filled out
-        self.complete: bool = False
-
-    def mark_for_extraction(
-        self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
-    ):
-        """
-        Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
-        :param el_id: id of the element
-        :param state: element/diagram state tracker
-        :param name: name to use for this element's text
-        :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
-        root element when we know we're finished
-        """
-        self.extract = True
-
-        # Set the name
-        if not self.name:
-            if name:
-                # Allow forcing a custom name
-                self.name = name
-            elif self.element.customName:
-                self.name = self.element.customName
-            else:
-                self.name = ""
-
-        # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
-        # to be added
-        # Also, if this is just a string literal etc, don't bother extracting it
-        if force or (self.complete and _worth_extracting(self.element)):
-            state.extract_into_diagram(el_id)
-
-
-class ConverterState:
-    """
-    Stores some state that persists between recursions into the element tree
-    """
-
-    def __init__(self, diagram_kwargs: Optional[dict] = None):
-        #: A dictionary mapping ParserElements to state relating to them
-        self._element_diagram_states: Dict[int, ElementState] = {}
-        #: A dictionary mapping ParserElement IDs to subdiagrams generated from them
-        self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
-        #: The index of the next unnamed element
-        self.unnamed_index: int = 1
-        #: The index of the next element. This is used for sorting
-        self.index: int = 0
-        #: Shared kwargs that are used to customize the construction of diagrams
-        self.diagram_kwargs: dict = diagram_kwargs or {}
-        self.extracted_diagram_names: Set[str] = set()
-
-    def __setitem__(self, key: int, value: ElementState):
-        self._element_diagram_states[key] = value
-
-    def __getitem__(self, key: int) -> ElementState:
-        return self._element_diagram_states[key]
-
-    def __delitem__(self, key: int):
-        del self._element_diagram_states[key]
-
-    def __contains__(self, key: int):
-        return key in self._element_diagram_states
-
-    def generate_unnamed(self) -> int:
-        """
-        Generate a number used in the name of an otherwise unnamed diagram
-        """
-        self.unnamed_index += 1
-        return self.unnamed_index
-
-    def generate_index(self) -> int:
-        """
-        Generate a number used to index a diagram
-        """
-        self.index += 1
-        return self.index
-
-    def extract_into_diagram(self, el_id: int):
-        """
-        Used when we encounter the same token twice in the same tree. When this
-        happens, we replace all instances of that token with a terminal, and
-        create a new subdiagram for the token
-        """
-        position = self[el_id]
-
-        # Replace the original definition of this element with a regular block
-        if position.parent:
-            ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
-            if "item" in position.parent.kwargs:
-                position.parent.kwargs["item"] = ret
-            elif "items" in position.parent.kwargs:
-                position.parent.kwargs["items"][position.parent_index] = ret
-
-        # If the element we're extracting is a group, skip to its content but keep the title
-        if position.converted.func == railroad.Group:
-            content = position.converted.kwargs["item"]
-        else:
-            content = position.converted
-
-        self.diagrams[el_id] = EditablePartial.from_call(
-            NamedDiagram,
-            name=position.name,
-            diagram=EditablePartial.from_call(
-                railroad.Diagram, content, **self.diagram_kwargs
-            ),
-            index=position.number,
-        )
-
-        del self[el_id]
-
-
-def _worth_extracting(element: pyparsing.ParserElement) -> bool:
-    """
-    Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
-    themselves have children, then its complex enough to extract
-    """
-    children = element.recurse()
-    return any(child.recurse() for child in children)
-
-
-def _apply_diagram_item_enhancements(fn):
-    """
-    decorator to ensure enhancements to a diagram item (such as results name annotations)
-    get applied on return from _to_diagram_element (we do this since there are several
-    returns in _to_diagram_element)
-    """
-
-    def _inner(
-        element: pyparsing.ParserElement,
-        parent: Optional[EditablePartial],
-        lookup: ConverterState = None,
-        vertical: int = None,
-        index: int = 0,
-        name_hint: str = None,
-        show_results_names: bool = False,
-        show_groups: bool = False,
-    ) -> Optional[EditablePartial]:
-
-        ret = fn(
-            element,
-            parent,
-            lookup,
-            vertical,
-            index,
-            name_hint,
-            show_results_names,
-            show_groups,
-        )
-
-        # apply annotation for results name, if present
-        if show_results_names and ret is not None:
-            element_results_name = element.resultsName
-            if element_results_name:
-                # add "*" to indicate if this is a "list all results" name
-                element_results_name += "" if element.modalResults else "*"
-                ret = EditablePartial.from_call(
-                    railroad.Group, item=ret, label=element_results_name
-                )
-
-        return ret
-
-    return _inner
-
-
-def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
-    non_diagramming_exprs = (
-        pyparsing.ParseElementEnhance,
-        pyparsing.PositionToken,
-        pyparsing.And._ErrorStop,
-    )
-    return [
-        e
-        for e in exprs
-        if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
-    ]
-
-
-@_apply_diagram_item_enhancements
-def _to_diagram_element(
-    element: pyparsing.ParserElement,
-    parent: Optional[EditablePartial],
-    lookup: ConverterState = None,
-    vertical: int = None,
-    index: int = 0,
-    name_hint: str = None,
-    show_results_names: bool = False,
-    show_groups: bool = False,
-) -> Optional[EditablePartial]:
-    """
-    Recursively converts a PyParsing Element to a railroad Element
-    :param lookup: The shared converter state that keeps track of useful things
-    :param index: The index of this element within the parent
-    :param parent: The parent of this element in the output tree
-    :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
-    it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
-    do so
-    :param name_hint: If provided, this will override the generated name
-    :param show_results_names: bool flag indicating whether to add annotations for results names
-    :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
-    :param show_groups: bool flag indicating whether to show groups using bounding box
-    """
-    exprs = element.recurse()
-    name = name_hint or element.customName or element.__class__.__name__
-
-    # Python's id() is used to provide a unique identifier for elements
-    el_id = id(element)
-
-    element_results_name = element.resultsName
-
-    # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
-    if not element.customName:
-        if isinstance(
-            element,
-            (
-                # pyparsing.TokenConverter,
-                # pyparsing.Forward,
-                pyparsing.Located,
-            ),
-        ):
-            # However, if this element has a useful custom name, and its child does not, we can pass it on to the child
-            if exprs:
-                if not exprs[0].customName:
-                    propagated_name = name
-                else:
-                    propagated_name = None
-
-                return _to_diagram_element(
-                    element.expr,
-                    parent=parent,
-                    lookup=lookup,
-                    vertical=vertical,
-                    index=index,
-                    name_hint=propagated_name,
-                    show_results_names=show_results_names,
-                    show_groups=show_groups,
-                )
-
-    # If the element isn't worth extracting, we always treat it as the first time we say it
-    if _worth_extracting(element):
-        if el_id in lookup:
-            # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
-            # so we have to extract it into a new diagram.
-            looked_up = lookup[el_id]
-            looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
-            ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
-            return ret
-
-        elif el_id in lookup.diagrams:
-            # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
-            # just put in a marker element that refers to the sub-diagram
-            ret = EditablePartial.from_call(
-                railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
-            )
-            return ret
-
-    # Recursively convert child elements
-    # Here we find the most relevant Railroad element for matching pyparsing Element
-    # We use ``items=[]`` here to hold the place for where the child elements will go once created
-    if isinstance(element, pyparsing.And):
-        # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
-        # (all will have the same name, and resultsName)
-        if not exprs:
-            return None
-        if len(set((e.name, e.resultsName) for e in exprs)) == 1:
-            ret = EditablePartial.from_call(
-                railroad.OneOrMore, item="", repeat=str(len(exprs))
-            )
-        elif _should_vertical(vertical, exprs):
-            ret = EditablePartial.from_call(railroad.Stack, items=[])
-        else:
-            ret = EditablePartial.from_call(railroad.Sequence, items=[])
-    elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
-        if not exprs:
-            return None
-        if _should_vertical(vertical, exprs):
-            ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
-        else:
-            ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
-    elif isinstance(element, pyparsing.Each):
-        if not exprs:
-            return None
-        ret = EditablePartial.from_call(EachItem, items=[])
-    elif isinstance(element, pyparsing.NotAny):
-        ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
-    elif isinstance(element, pyparsing.FollowedBy):
-        ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
-    elif isinstance(element, pyparsing.PrecededBy):
-        ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
-    elif isinstance(element, pyparsing.Group):
-        if show_groups:
-            ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
-        else:
-            ret = EditablePartial.from_call(railroad.Group, label="", item="")
-    elif isinstance(element, pyparsing.TokenConverter):
-        ret = EditablePartial.from_call(AnnotatedItem, label=type(element).__name__.lower(), item="")
-    elif isinstance(element, pyparsing.Opt):
-        ret = EditablePartial.from_call(railroad.Optional, item="")
-    elif isinstance(element, pyparsing.OneOrMore):
-        ret = EditablePartial.from_call(railroad.OneOrMore, item="")
-    elif isinstance(element, pyparsing.ZeroOrMore):
-        ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
-    elif isinstance(element, pyparsing.Group):
-        ret = EditablePartial.from_call(
-            railroad.Group, item=None, label=element_results_name
-        )
-    elif isinstance(element, pyparsing.Empty) and not element.customName:
-        # Skip unnamed "Empty" elements
-        ret = None
-    elif len(exprs) > 1:
-        ret = EditablePartial.from_call(railroad.Sequence, items=[])
-    elif len(exprs) > 0 and not element_results_name:
-        ret = EditablePartial.from_call(railroad.Group, item="", label=name)
-    else:
-        terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
-        ret = terminal
-
-    if ret is None:
-        return
-
-    # Indicate this element's position in the tree so we can extract it if necessary
-    lookup[el_id] = ElementState(
-        element=element,
-        converted=ret,
-        parent=parent,
-        parent_index=index,
-        number=lookup.generate_index(),
-    )
-    if element.customName:
-        lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
-
-    i = 0
-    for expr in exprs:
-        # Add a placeholder index in case we have to extract the child before we even add it to the parent
-        if "items" in ret.kwargs:
-            ret.kwargs["items"].insert(i, None)
-
-        item = _to_diagram_element(
-            expr,
-            parent=ret,
-            lookup=lookup,
-            vertical=vertical,
-            index=i,
-            show_results_names=show_results_names,
-            show_groups=show_groups,
-        )
-
-        # Some elements don't need to be shown in the diagram
-        if item is not None:
-            if "item" in ret.kwargs:
-                ret.kwargs["item"] = item
-            elif "items" in ret.kwargs:
-                # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
-                ret.kwargs["items"][i] = item
-                i += 1
-        elif "items" in ret.kwargs:
-            # If we're supposed to skip this element, remove it from the parent
-            del ret.kwargs["items"][i]
-
-    # If all this items children are none, skip this item
-    if ret and (
-        ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
-        or ("item" in ret.kwargs and ret.kwargs["item"] is None)
-    ):
-        ret = EditablePartial.from_call(railroad.Terminal, name)
-
-    # Mark this element as "complete", ie it has all of its children
-    if el_id in lookup:
-        lookup[el_id].complete = True
-
-    if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
-        lookup.extract_into_diagram(el_id)
-        if ret is not None:
-            ret = EditablePartial.from_call(
-                railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
-            )
-
-    return ret
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-310.pyc
deleted file mode 100644
index 9608642..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-310.pyc and /dev/null differ
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/exceptions.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/exceptions.py
deleted file mode 100644
index e06513e..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/exceptions.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# exceptions.py
-
-import re
-import sys
-from typing import Optional
-
-from .util import col, line, lineno, _collapse_string_to_ranges
-from .unicode import pyparsing_unicode as ppu
-
-
-class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic):
-    pass
-
-
-_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums)
-_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
-
-
-class ParseBaseException(Exception):
-    """base exception class for all parsing runtime exceptions"""
-
-    # Performance tuning: we construct a *lot* of these, so keep this
-    # constructor as small and fast as possible
-    def __init__(
-        self,
-        pstr: str,
-        loc: int = 0,
-        msg: Optional[str] = None,
-        elem=None,
-    ):
-        self.loc = loc
-        if msg is None:
-            self.msg = pstr
-            self.pstr = ""
-        else:
-            self.msg = msg
-            self.pstr = pstr
-        self.parser_element = self.parserElement = elem
-        self.args = (pstr, loc, msg)
-
-    @staticmethod
-    def explain_exception(exc, depth=16):
-        """
-        Method to take an exception and translate the Python internal traceback into a list
-        of the pyparsing expressions that caused the exception to be raised.
-
-        Parameters:
-
-        - exc - exception raised during parsing (need not be a ParseException, in support
-          of Python exceptions that might be raised in a parse action)
-        - depth (default=16) - number of levels back in the stack trace to list expression
-          and function names; if None, the full stack trace names will be listed; if 0, only
-          the failing input line, marker, and exception string will be shown
-
-        Returns a multi-line string listing the ParserElements and/or function names in the
-        exception's stack trace.
-        """
-        import inspect
-        from .core import ParserElement
-
-        if depth is None:
-            depth = sys.getrecursionlimit()
-        ret = []
-        if isinstance(exc, ParseBaseException):
-            ret.append(exc.line)
-            ret.append(" " * (exc.column - 1) + "^")
-        ret.append("{}: {}".format(type(exc).__name__, exc))
-
-        if depth > 0:
-            callers = inspect.getinnerframes(exc.__traceback__, context=depth)
-            seen = set()
-            for i, ff in enumerate(callers[-depth:]):
-                frm = ff[0]
-
-                f_self = frm.f_locals.get("self", None)
-                if isinstance(f_self, ParserElement):
-                    if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"):
-                        continue
-                    if id(f_self) in seen:
-                        continue
-                    seen.add(id(f_self))
-
-                    self_type = type(f_self)
-                    ret.append(
-                        "{}.{} - {}".format(
-                            self_type.__module__, self_type.__name__, f_self
-                        )
-                    )
-
-                elif f_self is not None:
-                    self_type = type(f_self)
-                    ret.append("{}.{}".format(self_type.__module__, self_type.__name__))
-
-                else:
-                    code = frm.f_code
-                    if code.co_name in ("wrapper", "<module>"):
-                        continue
-
-                    ret.append("{}".format(code.co_name))
-
-                depth -= 1
-                if not depth:
-                    break
-
-        return "\n".join(ret)
-
-    @classmethod
-    def _from_exception(cls, pe):
-        """
-        internal factory method to simplify creating one type of ParseException
-        from another - avoids having __init__ signature conflicts among subclasses
-        """
-        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
-
-    @property
-    def line(self) -> str:
-        """
-        Return the line of text where the exception occurred.
-        """
-        return line(self.loc, self.pstr)
-
-    @property
-    def lineno(self) -> int:
-        """
-        Return the 1-based line number of text where the exception occurred.
-        """
-        return lineno(self.loc, self.pstr)
-
-    @property
-    def col(self) -> int:
-        """
-        Return the 1-based column on the line of text where the exception occurred.
-        """
-        return col(self.loc, self.pstr)
-
-    @property
-    def column(self) -> int:
-        """
-        Return the 1-based column on the line of text where the exception occurred.
-        """
-        return col(self.loc, self.pstr)
-
-    def __str__(self) -> str:
-        if self.pstr:
-            if self.loc >= len(self.pstr):
-                foundstr = ", found end of text"
-            else:
-                # pull out next word at error location
-                found_match = _exception_word_extractor.match(self.pstr, self.loc)
-                if found_match is not None:
-                    found = found_match.group(0)
-                else:
-                    found = self.pstr[self.loc : self.loc + 1]
-                foundstr = (", found %r" % found).replace(r"\\", "\\")
-        else:
-            foundstr = ""
-        return "{}{}  (at char {}), (line:{}, col:{})".format(
-            self.msg, foundstr, self.loc, self.lineno, self.column
-        )
-
-    def __repr__(self):
-        return str(self)
-
-    def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str:
-        """
-        Extracts the exception line from the input string, and marks
-        the location of the exception with a special symbol.
-        """
-        markerString = marker_string if marker_string is not None else markerString
-        line_str = self.line
-        line_column = self.column - 1
-        if markerString:
-            line_str = "".join(
-                (line_str[:line_column], markerString, line_str[line_column:])
-            )
-        return line_str.strip()
-
-    def explain(self, depth=16) -> str:
-        """
-        Method to translate the Python internal traceback into a list
-        of the pyparsing expressions that caused the exception to be raised.
-
-        Parameters:
-
-        - depth (default=16) - number of levels back in the stack trace to list expression
-          and function names; if None, the full stack trace names will be listed; if 0, only
-          the failing input line, marker, and exception string will be shown
-
-        Returns a multi-line string listing the ParserElements and/or function names in the
-        exception's stack trace.
-
-        Example::
-
-            expr = pp.Word(pp.nums) * 3
-            try:
-                expr.parse_string("123 456 A789")
-            except pp.ParseException as pe:
-                print(pe.explain(depth=0))
-
-        prints::
-
-            123 456 A789
-                    ^
-            ParseException: Expected W:(0-9), found 'A'  (at char 8), (line:1, col:9)
-
-        Note: the diagnostic output will include string representations of the expressions
-        that failed to parse. These representations will be more helpful if you use `set_name` to
-        give identifiable names to your expressions. Otherwise they will use the default string
-        forms, which may be cryptic to read.
-
-        Note: pyparsing's default truncation of exception tracebacks may also truncate the
-        stack of expressions that are displayed in the ``explain`` output. To get the full listing
-        of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
-        """
-        return self.explain_exception(self, depth)
-
-    markInputline = mark_input_line
-
-
-class ParseException(ParseBaseException):
-    """
-    Exception thrown when a parse expression doesn't match the input string
-
-    Example::
-
-        try:
-            Word(nums).set_name("integer").parse_string("ABC")
-        except ParseException as pe:
-            print(pe)
-            print("column: {}".format(pe.column))
-
-    prints::
-
-       Expected integer (at char 0), (line:1, col:1)
-        column: 1
-
-    """
-
-
-class ParseFatalException(ParseBaseException):
-    """
-    User-throwable exception thrown when inconsistent parse content
-    is found; stops all parsing immediately
-    """
-
-
-class ParseSyntaxException(ParseFatalException):
-    """
-    Just like :class:`ParseFatalException`, but thrown internally
-    when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
-    that parsing is to stop immediately because an unbacktrackable
-    syntax error has been found.
-    """
-
-
-class RecursiveGrammarException(Exception):
-    """
-    Exception thrown by :class:`ParserElement.validate` if the
-    grammar could be left-recursive; parser may need to enable
-    left recursion using :class:`ParserElement.enable_left_recursion<ParserElement.enable_left_recursion>`
-    """
-
-    def __init__(self, parseElementList):
-        self.parseElementTrace = parseElementList
-
-    def __str__(self) -> str:
-        return "RecursiveGrammarException: {}".format(self.parseElementTrace)
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/helpers.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/helpers.py
deleted file mode 100644
index be8a365..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/helpers.py
+++ /dev/null
@@ -1,1083 +0,0 @@
-# helpers.py
-import html.entities
-import re
-
-from . import __diag__
-from .core import *
-from .util import _bslash, _flatten, _escape_regex_range_chars
-
-
-#
-# global helpers
-#
-def delimited_list(
-    expr: Union[str, ParserElement],
-    delim: Union[str, ParserElement] = ",",
-    combine: bool = False,
-    min: OptionalType[int] = None,
-    max: OptionalType[int] = None,
-    *,
-    allow_trailing_delim: bool = False,
-) -> ParserElement:
-    """Helper to define a delimited list of expressions - the delimiter
-    defaults to ','. By default, the list elements and delimiters can
-    have intervening whitespace, and comments, but this can be
-    overridden by passing ``combine=True`` in the constructor. If
-    ``combine`` is set to ``True``, the matching tokens are
-    returned as a single token string, with the delimiters included;
-    otherwise, the matching tokens are returned as a list of tokens,
-    with the delimiters suppressed.
-
-    If ``allow_trailing_delim`` is set to True, then the list may end with
-    a delimiter.
-
-    Example::
-
-        delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
-        delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
-    """
-    if isinstance(expr, str_type):
-        expr = ParserElement._literalStringClass(expr)
-
-    dlName = "{expr} [{delim} {expr}]...{end}".format(
-        expr=str(expr.copy().streamline()),
-        delim=str(delim),
-        end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
-    )
-
-    if not combine:
-        delim = Suppress(delim)
-
-    if min is not None:
-        if min < 1:
-            raise ValueError("min must be greater than 0")
-        min -= 1
-    if max is not None:
-        if min is not None and max <= min:
-            raise ValueError("max must be greater than, or equal to min")
-        max -= 1
-    delimited_list_expr = expr + (delim + expr)[min, max]
-
-    if allow_trailing_delim:
-        delimited_list_expr += Opt(delim)
-
-    if combine:
-        return Combine(delimited_list_expr).set_name(dlName)
-    else:
-        return delimited_list_expr.set_name(dlName)
-
-
-def counted_array(
-    expr: ParserElement,
-    int_expr: OptionalType[ParserElement] = None,
-    *,
-    intExpr: OptionalType[ParserElement] = None,
-) -> ParserElement:
-    """Helper to define a counted list of expressions.
-
-    This helper defines a pattern of the form::
-
-        integer expr expr expr...
-
-    where the leading integer tells how many expr expressions follow.
-    The matched tokens returns the array of expr tokens as a list - the
-    leading count token is suppressed.
-
-    If ``int_expr`` is specified, it should be a pyparsing expression
-    that produces an integer value.
-
-    Example::
-
-        counted_array(Word(alphas)).parse_string('2 ab cd ef')  # -> ['ab', 'cd']
-
-        # in this parser, the leading integer value is given in binary,
-        # '10' indicating that 2 values are in the array
-        binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
-        counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef')  # -> ['ab', 'cd']
-
-        # if other fields must be parsed after the count but before the
-        # list items, give the fields results names and they will
-        # be preserved in the returned ParseResults:
-        count_with_metadata = integer + Word(alphas)("type")
-        typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
-        result = typed_array.parse_string("3 bool True True False")
-        print(result.dump())
-
-        # prints
-        # ['True', 'True', 'False']
-        # - items: ['True', 'True', 'False']
-        # - type: 'bool'
-    """
-    intExpr = intExpr or int_expr
-    array_expr = Forward()
-
-    def count_field_parse_action(s, l, t):
-        nonlocal array_expr
-        n = t[0]
-        array_expr <<= (expr * n) if n else Empty()
-        # clear list contents, but keep any named results
-        del t[:]
-
-    if intExpr is None:
-        intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
-    else:
-        intExpr = intExpr.copy()
-    intExpr.set_name("arrayLen")
-    intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
-    return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
-
-
-def match_previous_literal(expr: ParserElement) -> ParserElement:
-    """Helper to define an expression that is indirectly defined from
-    the tokens matched in a previous expression, that is, it looks for
-    a 'repeat' of a previous expression.  For example::
-
-        first = Word(nums)
-        second = match_previous_literal(first)
-        match_expr = first + ":" + second
-
-    will match ``"1:1"``, but not ``"1:2"``.  Because this
-    matches a previous literal, will also match the leading
-    ``"1:1"`` in ``"1:10"``. If this is not desired, use
-    :class:`match_previous_expr`. Do *not* use with packrat parsing
-    enabled.
-    """
-    rep = Forward()
-
-    def copy_token_to_repeater(s, l, t):
-        if t:
-            if len(t) == 1:
-                rep << t[0]
-            else:
-                # flatten t tokens
-                tflat = _flatten(t.as_list())
-                rep << And(Literal(tt) for tt in tflat)
-        else:
-            rep << Empty()
-
-    expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
-    rep.set_name("(prev) " + str(expr))
-    return rep
-
-
-def match_previous_expr(expr: ParserElement) -> ParserElement:
-    """Helper to define an expression that is indirectly defined from
-    the tokens matched in a previous expression, that is, it looks for
-    a 'repeat' of a previous expression.  For example::
-
-        first = Word(nums)
-        second = match_previous_expr(first)
-        match_expr = first + ":" + second
-
-    will match ``"1:1"``, but not ``"1:2"``.  Because this
-    matches by expressions, will *not* match the leading ``"1:1"``
-    in ``"1:10"``; the expressions are evaluated first, and then
-    compared, so ``"1"`` is compared with ``"10"``. Do *not* use
-    with packrat parsing enabled.
-    """
-    rep = Forward()
-    e2 = expr.copy()
-    rep <<= e2
-
-    def copy_token_to_repeater(s, l, t):
-        matchTokens = _flatten(t.as_list())
-
-        def must_match_these_tokens(s, l, t):
-            theseTokens = _flatten(t.as_list())
-            if theseTokens != matchTokens:
-                raise ParseException(
-                    s, l, "Expected {}, found{}".format(matchTokens, theseTokens)
-                )
-
-        rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
-
-    expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
-    rep.set_name("(prev) " + str(expr))
-    return rep
-
-
-def one_of(
-    strs: Union[IterableType[str], str],
-    caseless: bool = False,
-    use_regex: bool = True,
-    as_keyword: bool = False,
-    *,
-    useRegex: bool = True,
-    asKeyword: bool = False,
-) -> ParserElement:
-    """Helper to quickly define a set of alternative :class:`Literal` s,
-    and makes sure to do longest-first testing when there is a conflict,
-    regardless of the input order, but returns
-    a :class:`MatchFirst` for best performance.
-
-    Parameters:
-
-    - ``strs`` - a string of space-delimited literals, or a collection of
-      string literals
-    - ``caseless`` - treat all literals as caseless - (default= ``False``)
-    - ``use_regex`` - as an optimization, will
-      generate a :class:`Regex` object; otherwise, will generate
-      a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
-      creating a :class:`Regex` raises an exception) - (default= ``True``)
-    - ``as_keyword`` - enforce :class:`Keyword`-style matching on the
-      generated expressions - (default= ``False``)
-    - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
-      but will be removed in a future release
-
-    Example::
-
-        comp_oper = one_of("< = > <= >= !=")
-        var = Word(alphas)
-        number = Word(nums)
-        term = var | number
-        comparison_expr = term + comp_oper + term
-        print(comparison_expr.search_string("B = 12  AA=23 B<=AA AA>12"))
-
-    prints::
-
-        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
-    """
-    asKeyword = asKeyword or as_keyword
-    useRegex = useRegex and use_regex
-
-    if (
-        isinstance(caseless, str_type)
-        and __diag__.warn_on_multiple_string_args_to_oneof
-    ):
-        warnings.warn(
-            "More than one string argument passed to one_of, pass"
-            " choices as a list or space-delimited string",
-            stacklevel=2,
-        )
-
-    if caseless:
-        isequal = lambda a, b: a.upper() == b.upper()
-        masks = lambda a, b: b.upper().startswith(a.upper())
-        parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
-    else:
-        isequal = lambda a, b: a == b
-        masks = lambda a, b: b.startswith(a)
-        parseElementClass = Keyword if asKeyword else Literal
-
-    symbols: List[str] = []
-    if isinstance(strs, str_type):
-        symbols = strs.split()
-    elif isinstance(strs, Iterable):
-        symbols = list(strs)
-    else:
-        raise TypeError("Invalid argument to one_of, expected string or iterable")
-    if not symbols:
-        return NoMatch()
-
-    # reorder given symbols to take care to avoid masking longer choices with shorter ones
-    # (but only if the given symbols are not just single characters)
-    if any(len(sym) > 1 for sym in symbols):
-        i = 0
-        while i < len(symbols) - 1:
-            cur = symbols[i]
-            for j, other in enumerate(symbols[i + 1 :]):
-                if isequal(other, cur):
-                    del symbols[i + j + 1]
-                    break
-                elif masks(cur, other):
-                    del symbols[i + j + 1]
-                    symbols.insert(i, other)
-                    break
-            else:
-                i += 1
-
-    if useRegex:
-        re_flags: int = re.IGNORECASE if caseless else 0
-
-        try:
-            if all(len(sym) == 1 for sym in symbols):
-                # symbols are just single characters, create range regex pattern
-                patt = "[{}]".format(
-                    "".join(_escape_regex_range_chars(sym) for sym in symbols)
-                )
-            else:
-                patt = "|".join(re.escape(sym) for sym in symbols)
-
-            # wrap with \b word break markers if defining as keywords
-            if asKeyword:
-                patt = r"\b(?:{})\b".format(patt)
-
-            ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
-
-            if caseless:
-                # add parse action to return symbols as specified, not in random
-                # casing as found in input string
-                symbol_map = {sym.lower(): sym for sym in symbols}
-                ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
-
-            return ret
-
-        except re.error:
-            warnings.warn(
-                "Exception creating Regex for one_of, building MatchFirst", stacklevel=2
-            )
-
-    # last resort, just use MatchFirst
-    return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
-        " | ".join(symbols)
-    )
-
-
-def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
-    """Helper to easily and clearly define a dictionary by specifying
-    the respective patterns for the key and value.  Takes care of
-    defining the :class:`Dict`, :class:`ZeroOrMore`, and
-    :class:`Group` tokens in the proper order.  The key pattern
-    can include delimiting markers or punctuation, as long as they are
-    suppressed, thereby leaving the significant key text.  The value
-    pattern can include named results, so that the :class:`Dict` results
-    can include named token fields.
-
-    Example::
-
-        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
-        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
-        print(OneOrMore(attr_expr).parse_string(text).dump())
-
-        attr_label = label
-        attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
-
-        # similar to Dict, but simpler call format
-        result = dict_of(attr_label, attr_value).parse_string(text)
-        print(result.dump())
-        print(result['shape'])
-        print(result.shape)  # object attribute access works too
-        print(result.as_dict())
-
-    prints::
-
-        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
-        - color: 'light blue'
-        - posn: 'upper left'
-        - shape: 'SQUARE'
-        - texture: 'burlap'
-        SQUARE
-        SQUARE
-        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
-    """
-    return Dict(OneOrMore(Group(key + value)))
-
-
-def original_text_for(
-    expr: ParserElement, as_string: bool = True, *, asString: bool = True
-) -> ParserElement:
-    """Helper to return the original, untokenized text for a given
-    expression.  Useful to restore the parsed fields of an HTML start
-    tag into the raw tag text itself, or to revert separate tokens with
-    intervening whitespace back to the original matching input text. By
-    default, returns astring containing the original parsed text.
-
-    If the optional ``as_string`` argument is passed as
-    ``False``, then the return value is
-    a :class:`ParseResults` containing any results names that
-    were originally matched, and a single token containing the original
-    matched text from the input string.  So if the expression passed to
-    :class:`original_text_for` contains expressions with defined
-    results names, you must set ``as_string`` to ``False`` if you
-    want to preserve those results name values.
-
-    The ``asString`` pre-PEP8 argument is retained for compatibility,
-    but will be removed in a future release.
-
-    Example::
-
-        src = "this is test <b> bold <i>text</i> </b> normal text "
-        for tag in ("b", "i"):
-            opener, closer = make_html_tags(tag)
-            patt = original_text_for(opener + SkipTo(closer) + closer)
-            print(patt.search_string(src)[0])
-
-    prints::
-
-        ['<b> bold <i>text</i> </b>']
-        ['<i>text</i>']
-    """
-    asString = asString and as_string
-
-    locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
-    endlocMarker = locMarker.copy()
-    endlocMarker.callPreparse = False
-    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
-    if asString:
-        extractText = lambda s, l, t: s[t._original_start : t._original_end]
-    else:
-
-        def extractText(s, l, t):
-            t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
-
-    matchExpr.set_parse_action(extractText)
-    matchExpr.ignoreExprs = expr.ignoreExprs
-    matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
-    return matchExpr
-
-
-def ungroup(expr: ParserElement) -> ParserElement:
-    """Helper to undo pyparsing's default grouping of And expressions,
-    even if all but one are non-empty.
-    """
-    return TokenConverter(expr).add_parse_action(lambda t: t[0])
-
-
-def locatedExpr(expr: ParserElement) -> ParserElement:
-    """
-    (DEPRECATED - future code should use the Located class)
-    Helper to decorate a returned token with its starting and ending
-    locations in the input string.
-
-    This helper adds the following results names:
-
-    - ``locn_start`` - location where matched expression begins
-    - ``locn_end`` - location where matched expression ends
-    - ``value`` - the actual parsed results
-
-    Be careful if the input text contains ``<TAB>`` characters, you
-    may want to call :class:`ParserElement.parseWithTabs`
-
-    Example::
-
-        wd = Word(alphas)
-        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
-            print(match)
-
-    prints::
-
-        [[0, 'ljsdf', 5]]
-        [[8, 'lksdjjf', 15]]
-        [[18, 'lkkjj', 23]]
-    """
-    locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
-    return Group(
-        locator("locn_start")
-        + expr("value")
-        + locator.copy().leaveWhitespace()("locn_end")
-    )
-
-
-def nested_expr(
-    opener: Union[str, ParserElement] = "(",
-    closer: Union[str, ParserElement] = ")",
-    content: OptionalType[ParserElement] = None,
-    ignore_expr: ParserElement = quoted_string(),
-    *,
-    ignoreExpr: ParserElement = quoted_string(),
-) -> ParserElement:
-    """Helper method for defining nested lists enclosed in opening and
-    closing delimiters (``"("`` and ``")"`` are the default).
-
-    Parameters:
-    - ``opener`` - opening character for a nested list
-      (default= ``"("``); can also be a pyparsing expression
-    - ``closer`` - closing character for a nested list
-      (default= ``")"``); can also be a pyparsing expression
-    - ``content`` - expression for items within the nested lists
-      (default= ``None``)
-    - ``ignore_expr`` - expression for ignoring opening and closing delimiters
-      (default= :class:`quoted_string`)
-    - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
-      but will be removed in a future release
-
-    If an expression is not provided for the content argument, the
-    nested expression will capture all whitespace-delimited content
-    between delimiters as a list of separate values.
-
-    Use the ``ignore_expr`` argument to define expressions that may
-    contain opening or closing characters that should not be treated as
-    opening or closing characters for nesting, such as quoted_string or
-    a comment expression.  Specify multiple expressions using an
-    :class:`Or` or :class:`MatchFirst`. The default is
-    :class:`quoted_string`, but if no expressions are to be ignored, then
-    pass ``None`` for this argument.
-
-    Example::
-
-        data_type = one_of("void int short long char float double")
-        decl_data_type = Combine(data_type + Opt(Word('*')))
-        ident = Word(alphas+'_', alphanums+'_')
-        number = pyparsing_common.number
-        arg = Group(decl_data_type + ident)
-        LPAR, RPAR = map(Suppress, "()")
-
-        code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
-
-        c_function = (decl_data_type("type")
-                      + ident("name")
-                      + LPAR + Opt(delimited_list(arg), [])("args") + RPAR
-                      + code_body("body"))
-        c_function.ignore(c_style_comment)
-
-        source_code = '''
-            int is_odd(int x) {
-                return (x%2);
-            }
-
-            int dec_to_hex(char hchar) {
-                if (hchar >= '0' && hchar <= '9') {
-                    return (ord(hchar)-ord('0'));
-                } else {
-                    return (10+ord(hchar)-ord('A'));
-                }
-            }
-        '''
-        for func in c_function.search_string(source_code):
-            print("%(name)s (%(type)s) args: %(args)s" % func)
-
-
-    prints::
-
-        is_odd (int) args: [['int', 'x']]
-        dec_to_hex (int) args: [['char', 'hchar']]
-    """
-    if ignoreExpr != ignore_expr:
-        ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
-    if opener == closer:
-        raise ValueError("opening and closing strings cannot be the same")
-    if content is None:
-        if isinstance(opener, str_type) and isinstance(closer, str_type):
-            if len(opener) == 1 and len(closer) == 1:
-                if ignoreExpr is not None:
-                    content = Combine(
-                        OneOrMore(
-                            ~ignoreExpr
-                            + CharsNotIn(
-                                opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
-                                exact=1,
-                            )
-                        )
-                    ).set_parse_action(lambda t: t[0].strip())
-                else:
-                    content = empty.copy() + CharsNotIn(
-                        opener + closer + ParserElement.DEFAULT_WHITE_CHARS
-                    ).set_parse_action(lambda t: t[0].strip())
-            else:
-                if ignoreExpr is not None:
-                    content = Combine(
-                        OneOrMore(
-                            ~ignoreExpr
-                            + ~Literal(opener)
-                            + ~Literal(closer)
-                            + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
-                        )
-                    ).set_parse_action(lambda t: t[0].strip())
-                else:
-                    content = Combine(
-                        OneOrMore(
-                            ~Literal(opener)
-                            + ~Literal(closer)
-                            + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
-                        )
-                    ).set_parse_action(lambda t: t[0].strip())
-        else:
-            raise ValueError(
-                "opening and closing arguments must be strings if no content expression is given"
-            )
-    ret = Forward()
-    if ignoreExpr is not None:
-        ret <<= Group(
-            Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
-        )
-    else:
-        ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
-    ret.set_name("nested %s%s expression" % (opener, closer))
-    return ret
-
-
-def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
-    """Internal helper to construct opening and closing tag expressions, given a tag name"""
-    if isinstance(tagStr, str_type):
-        resname = tagStr
-        tagStr = Keyword(tagStr, caseless=not xml)
-    else:
-        resname = tagStr.name
-
-    tagAttrName = Word(alphas, alphanums + "_-:")
-    if xml:
-        tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
-        openTag = (
-            suppress_LT
-            + tagStr("tag")
-            + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
-            + Opt("/", default=[False])("empty").set_parse_action(
-                lambda s, l, t: t[0] == "/"
-            )
-            + suppress_GT
-        )
-    else:
-        tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
-            printables, exclude_chars=">"
-        )
-        openTag = (
-            suppress_LT
-            + tagStr("tag")
-            + Dict(
-                ZeroOrMore(
-                    Group(
-                        tagAttrName.set_parse_action(lambda t: t[0].lower())
-                        + Opt(Suppress("=") + tagAttrValue)
-                    )
-                )
-            )
-            + Opt("/", default=[False])("empty").set_parse_action(
-                lambda s, l, t: t[0] == "/"
-            )
-            + suppress_GT
-        )
-    closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
-
-    openTag.set_name("<%s>" % resname)
-    # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
-    openTag.add_parse_action(
-        lambda t: t.__setitem__(
-            "start" + "".join(resname.replace(":", " ").title().split()), t.copy()
-        )
-    )
-    closeTag = closeTag(
-        "end" + "".join(resname.replace(":", " ").title().split())
-    ).set_name("</%s>" % resname)
-    openTag.tag = resname
-    closeTag.tag = resname
-    openTag.tag_body = SkipTo(closeTag())
-    return openTag, closeTag
-
-
-def make_html_tags(
-    tag_str: Union[str, ParserElement]
-) -> Tuple[ParserElement, ParserElement]:
-    """Helper to construct opening and closing tag expressions for HTML,
-    given a tag name. Matches tags in either upper or lower case,
-    attributes with namespaces and with quoted or unquoted values.
-
-    Example::
-
-        text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
-        # make_html_tags returns pyparsing expressions for the opening and
-        # closing tags as a 2-tuple
-        a, a_end = make_html_tags("A")
-        link_expr = a + SkipTo(a_end)("link_text") + a_end
-
-        for link in link_expr.search_string(text):
-            # attributes in the <A> tag (like "href" shown here) are
-            # also accessible as named results
-            print(link.link_text, '->', link.href)
-
-    prints::
-
-        pyparsing -> https://github.com/pyparsing/pyparsing/wiki
-    """
-    return _makeTags(tag_str, False)
-
-
-def make_xml_tags(
-    tag_str: Union[str, ParserElement]
-) -> Tuple[ParserElement, ParserElement]:
-    """Helper to construct opening and closing tag expressions for XML,
-    given a tag name. Matches tags only in the given upper/lower case.
-
-    Example: similar to :class:`make_html_tags`
-    """
-    return _makeTags(tag_str, True)
-
-
-any_open_tag, any_close_tag = make_html_tags(
-    Word(alphas, alphanums + "_:").set_name("any tag")
-)
-
-_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
-common_html_entity = Regex("&(?P<entity>" + "|".join(_htmlEntityMap) + ");").set_name(
-    "common HTML entity"
-)
-
-
-def replace_html_entity(t):
-    """Helper parser action to replace common HTML entities with their special characters"""
-    return _htmlEntityMap.get(t.entity)
-
-
-class OpAssoc(Enum):
-    LEFT = 1
-    RIGHT = 2
-
-
-InfixNotationOperatorArgType = Union[
-    ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]]
-]
-InfixNotationOperatorSpec = Union[
-    Tuple[
-        InfixNotationOperatorArgType,
-        int,
-        OpAssoc,
-        OptionalType[ParseAction],
-    ],
-    Tuple[
-        InfixNotationOperatorArgType,
-        int,
-        OpAssoc,
-    ],
-]
-
-
-def infix_notation(
-    base_expr: ParserElement,
-    op_list: List[InfixNotationOperatorSpec],
-    lpar: Union[str, ParserElement] = Suppress("("),
-    rpar: Union[str, ParserElement] = Suppress(")"),
-) -> ParserElement:
-    """Helper method for constructing grammars of expressions made up of
-    operators working in a precedence hierarchy.  Operators may be unary
-    or binary, left- or right-associative.  Parse actions can also be
-    attached to operator expressions. The generated parser will also
-    recognize the use of parentheses to override operator precedences
-    (see example below).
-
-    Note: if you define a deep operator list, you may see performance
-    issues when using infix_notation. See
-    :class:`ParserElement.enable_packrat` for a mechanism to potentially
-    improve your parser performance.
-
-    Parameters:
-    - ``base_expr`` - expression representing the most basic operand to
-      be used in the expression
-    - ``op_list`` - list of tuples, one for each operator precedence level
-      in the expression grammar; each tuple is of the form ``(op_expr,
-      num_operands, right_left_assoc, (optional)parse_action)``, where:
-
-      - ``op_expr`` is the pyparsing expression for the operator; may also
-        be a string, which will be converted to a Literal; if ``num_operands``
-        is 3, ``op_expr`` is a tuple of two expressions, for the two
-        operators separating the 3 terms
-      - ``num_operands`` is the number of terms for this operator (must be 1,
-        2, or 3)
-      - ``right_left_assoc`` is the indicator whether the operator is right
-        or left associative, using the pyparsing-defined constants
-        ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
-      - ``parse_action`` is the parse action to be associated with
-        expressions matching this operator expression (the parse action
-        tuple member may be omitted); if the parse action is passed
-        a tuple or list of functions, this is equivalent to calling
-        ``set_parse_action(*fn)``
-        (:class:`ParserElement.set_parse_action`)
-    - ``lpar`` - expression for matching left-parentheses; if passed as a
-      str, then will be parsed as Suppress(lpar). If lpar is passed as
-      an expression (such as ``Literal('(')``), then it will be kept in
-      the parsed results, and grouped with them. (default= ``Suppress('(')``)
-    - ``rpar`` - expression for matching right-parentheses; if passed as a
-      str, then will be parsed as Suppress(rpar). If rpar is passed as
-      an expression (such as ``Literal(')')``), then it will be kept in
-      the parsed results, and grouped with them. (default= ``Suppress(')')``)
-
-    Example::
-
-        # simple example of four-function arithmetic with ints and
-        # variable names
-        integer = pyparsing_common.signed_integer
-        varname = pyparsing_common.identifier
-
-        arith_expr = infix_notation(integer | varname,
-            [
-            ('-', 1, OpAssoc.RIGHT),
-            (one_of('* /'), 2, OpAssoc.LEFT),
-            (one_of('+ -'), 2, OpAssoc.LEFT),
-            ])
-
-        arith_expr.run_tests('''
-            5+3*6
-            (5+3)*6
-            -2--11
-            ''', full_dump=False)
-
-    prints::
-
-        5+3*6
-        [[5, '+', [3, '*', 6]]]
-
-        (5+3)*6
-        [[[5, '+', 3], '*', 6]]
-
-        -2--11
-        [[['-', 2], '-', ['-', 11]]]
-    """
-    # captive version of FollowedBy that does not do parse actions or capture results names
-    class _FB(FollowedBy):
-        def parseImpl(self, instring, loc, doActions=True):
-            self.expr.try_parse(instring, loc)
-            return loc, []
-
-    _FB.__name__ = "FollowedBy>"
-
-    ret = Forward()
-    if isinstance(lpar, str):
-        lpar = Suppress(lpar)
-    if isinstance(rpar, str):
-        rpar = Suppress(rpar)
-
-    # if lpar and rpar are not suppressed, wrap in group
-    if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
-        lastExpr = base_expr | Group(lpar + ret + rpar)
-    else:
-        lastExpr = base_expr | (lpar + ret + rpar)
-
-    for i, operDef in enumerate(op_list):
-        opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
-        if isinstance(opExpr, str_type):
-            opExpr = ParserElement._literalStringClass(opExpr)
-        if arity == 3:
-            if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
-                raise ValueError(
-                    "if numterms=3, opExpr must be a tuple or list of two expressions"
-                )
-            opExpr1, opExpr2 = opExpr
-            term_name = "{}{} term".format(opExpr1, opExpr2)
-        else:
-            term_name = "{} term".format(opExpr)
-
-        if not 1 <= arity <= 3:
-            raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
-
-        if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
-            raise ValueError("operator must indicate right or left associativity")
-
-        thisExpr = Forward().set_name(term_name)
-        if rightLeftAssoc is OpAssoc.LEFT:
-            if arity == 1:
-                matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
-            elif arity == 2:
-                if opExpr is not None:
-                    matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
-                        lastExpr + (opExpr + lastExpr)[1, ...]
-                    )
-                else:
-                    matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
-            elif arity == 3:
-                matchExpr = _FB(
-                    lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
-                ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
-        elif rightLeftAssoc is OpAssoc.RIGHT:
-            if arity == 1:
-                # try to avoid LR with this extra test
-                if not isinstance(opExpr, Opt):
-                    opExpr = Opt(opExpr)
-                matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
-            elif arity == 2:
-                if opExpr is not None:
-                    matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
-                        lastExpr + (opExpr + thisExpr)[1, ...]
-                    )
-                else:
-                    matchExpr = _FB(lastExpr + thisExpr) + Group(
-                        lastExpr + thisExpr[1, ...]
-                    )
-            elif arity == 3:
-                matchExpr = _FB(
-                    lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
-                ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
-        if pa:
-            if isinstance(pa, (tuple, list)):
-                matchExpr.set_parse_action(*pa)
-            else:
-                matchExpr.set_parse_action(pa)
-        thisExpr <<= (matchExpr | lastExpr).setName(term_name)
-        lastExpr = thisExpr
-    ret <<= lastExpr
-    return ret
-
-
-def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
-    """
-    (DEPRECATED - use IndentedBlock class instead)
-    Helper method for defining space-delimited indentation blocks,
-    such as those used to define block statements in Python source code.
-
-    Parameters:
-
-    - ``blockStatementExpr`` - expression defining syntax of statement that
-      is repeated within the indented block
-    - ``indentStack`` - list created by caller to manage indentation stack
-      (multiple ``statementWithIndentedBlock`` expressions within a single
-      grammar should share a common ``indentStack``)
-    - ``indent`` - boolean indicating whether block must be indented beyond
-      the current level; set to ``False`` for block of left-most statements
-      (default= ``True``)
-
-    A valid block must contain at least one ``blockStatement``.
-
-    (Note that indentedBlock uses internal parse actions which make it
-    incompatible with packrat parsing.)
-
-    Example::
-
-        data = '''
-        def A(z):
-          A1
-          B = 100
-          G = A2
-          A2
-          A3
-        B
-        def BB(a,b,c):
-          BB1
-          def BBA():
-            bba1
-            bba2
-            bba3
-        C
-        D
-        def spam(x,y):
-             def eggs(z):
-                 pass
-        '''
-
-
-        indentStack = [1]
-        stmt = Forward()
-
-        identifier = Word(alphas, alphanums)
-        funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
-        func_body = indentedBlock(stmt, indentStack)
-        funcDef = Group(funcDecl + func_body)
-
-        rvalue = Forward()
-        funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
-        rvalue << (funcCall | identifier | Word(nums))
-        assignment = Group(identifier + "=" + rvalue)
-        stmt << (funcDef | assignment | identifier)
-
-        module_body = OneOrMore(stmt)
-
-        parseTree = module_body.parseString(data)
-        parseTree.pprint()
-
-    prints::
-
-        [['def',
-          'A',
-          ['(', 'z', ')'],
-          ':',
-          [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
-         'B',
-         ['def',
-          'BB',
-          ['(', 'a', 'b', 'c', ')'],
-          ':',
-          [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
-         'C',
-         'D',
-         ['def',
-          'spam',
-          ['(', 'x', 'y', ')'],
-          ':',
-          [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
-    """
-    backup_stacks.append(indentStack[:])
-
-    def reset_stack():
-        indentStack[:] = backup_stacks[-1]
-
-    def checkPeerIndent(s, l, t):
-        if l >= len(s):
-            return
-        curCol = col(l, s)
-        if curCol != indentStack[-1]:
-            if curCol > indentStack[-1]:
-                raise ParseException(s, l, "illegal nesting")
-            raise ParseException(s, l, "not a peer entry")
-
-    def checkSubIndent(s, l, t):
-        curCol = col(l, s)
-        if curCol > indentStack[-1]:
-            indentStack.append(curCol)
-        else:
-            raise ParseException(s, l, "not a subentry")
-
-    def checkUnindent(s, l, t):
-        if l >= len(s):
-            return
-        curCol = col(l, s)
-        if not (indentStack and curCol in indentStack):
-            raise ParseException(s, l, "not an unindent")
-        if curCol < indentStack[-1]:
-            indentStack.pop()
-
-    NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
-    INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
-    PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
-    UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
-    if indent:
-        smExpr = Group(
-            Opt(NL)
-            + INDENT
-            + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
-            + UNDENT
-        )
-    else:
-        smExpr = Group(
-            Opt(NL)
-            + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
-            + Opt(UNDENT)
-        )
-
-    # add a parse action to remove backup_stack from list of backups
-    smExpr.add_parse_action(
-        lambda: backup_stacks.pop(-1) and None if backup_stacks else None
-    )
-    smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
-    blockStatementExpr.ignore(_bslash + LineEnd())
-    return smExpr.set_name("indented block")
-
-
-# it's easy to get these comment structures wrong - they're very common, so may as well make them available
-c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
-    "C style comment"
-)
-"Comment of the form ``/* ... */``"
-
-html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
-"Comment of the form ``<!-- ... -->``"
-
-rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
-dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
-"Comment of the form ``// ... (to end of line)``"
-
-cpp_style_comment = Combine(
-    Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
-).set_name("C++ style comment")
-"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
-
-java_style_comment = cpp_style_comment
-"Same as :class:`cpp_style_comment`"
-
-python_style_comment = Regex(r"#.*").set_name("Python style comment")
-"Comment of the form ``# ... (to end of line)``"
-
-
-# build list of built-in expressions, for future reference if a global default value
-# gets updated
-_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)]
-
-
-# pre-PEP8 compatible names
-delimitedList = delimited_list
-countedArray = counted_array
-matchPreviousLiteral = match_previous_literal
-matchPreviousExpr = match_previous_expr
-oneOf = one_of
-dictOf = dict_of
-originalTextFor = original_text_for
-nestedExpr = nested_expr
-makeHTMLTags = make_html_tags
-makeXMLTags = make_xml_tags
-anyOpenTag, anyCloseTag = any_open_tag, any_close_tag
-commonHTMLEntity = common_html_entity
-replaceHTMLEntity = replace_html_entity
-opAssoc = OpAssoc
-infixNotation = infix_notation
-cStyleComment = c_style_comment
-htmlComment = html_comment
-restOfLine = rest_of_line
-dblSlashComment = dbl_slash_comment
-cppStyleComment = cpp_style_comment
-javaStyleComment = java_style_comment
-pythonStyleComment = python_style_comment
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/results.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/results.py
deleted file mode 100644
index bb444df..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/results.py
+++ /dev/null
@@ -1,760 +0,0 @@
-# results.py
-from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator
-import pprint
-from weakref import ref as wkref
-from typing import Tuple, Any
-
-str_type: Tuple[type, ...] = (str, bytes)
-_generator_type = type((_ for _ in ()))
-
-
-class _ParseResultsWithOffset:
-    __slots__ = ["tup"]
-
-    def __init__(self, p1, p2):
-        self.tup = (p1, p2)
-
-    def __getitem__(self, i):
-        return self.tup[i]
-
-    def __getstate__(self):
-        return self.tup
-
-    def __setstate__(self, *args):
-        self.tup = args[0]
-
-
-class ParseResults:
-    """Structured parse results, to provide multiple means of access to
-    the parsed data:
-
-    - as a list (``len(results)``)
-    - by list index (``results[0], results[1]``, etc.)
-    - by attribute (``results.<results_name>`` - see :class:`ParserElement.set_results_name`)
-
-    Example::
-
-        integer = Word(nums)
-        date_str = (integer.set_results_name("year") + '/'
-                    + integer.set_results_name("month") + '/'
-                    + integer.set_results_name("day"))
-        # equivalent form:
-        # date_str = (integer("year") + '/'
-        #             + integer("month") + '/'
-        #             + integer("day"))
-
-        # parse_string returns a ParseResults object
-        result = date_str.parse_string("1999/12/31")
-
-        def test(s, fn=repr):
-            print("{} -> {}".format(s, fn(eval(s))))
-        test("list(result)")
-        test("result[0]")
-        test("result['month']")
-        test("result.day")
-        test("'month' in result")
-        test("'minutes' in result")
-        test("result.dump()", str)
-
-    prints::
-
-        list(result) -> ['1999', '/', '12', '/', '31']
-        result[0] -> '1999'
-        result['month'] -> '12'
-        result.day -> '31'
-        'month' in result -> True
-        'minutes' in result -> False
-        result.dump() -> ['1999', '/', '12', '/', '31']
-        - day: '31'
-        - month: '12'
-        - year: '1999'
-    """
-
-    _null_values: Tuple[Any, ...] = (None, [], "", ())
-
-    __slots__ = [
-        "_name",
-        "_parent",
-        "_all_names",
-        "_modal",
-        "_toklist",
-        "_tokdict",
-        "__weakref__",
-    ]
-
-    class List(list):
-        """
-        Simple wrapper class to distinguish parsed list results that should be preserved
-        as actual Python lists, instead of being converted to :class:`ParseResults`:
-
-            LBRACK, RBRACK = map(pp.Suppress, "[]")
-            element = pp.Forward()
-            item = ppc.integer
-            element_list = LBRACK + pp.delimited_list(element) + RBRACK
-
-            # add parse actions to convert from ParseResults to actual Python collection types
-            def as_python_list(t):
-                return pp.ParseResults.List(t.as_list())
-            element_list.add_parse_action(as_python_list)
-
-            element <<= item | element_list
-
-            element.run_tests('''
-                100
-                [2,3,4]
-                [[2, 1],3,4]
-                [(2, 1),3,4]
-                (2,3,4)
-                ''', post_parse=lambda s, r: (r[0], type(r[0])))
-
-        prints:
-
-            100
-            (100, <class 'int'>)
-
-            [2,3,4]
-            ([2, 3, 4], <class 'list'>)
-
-            [[2, 1],3,4]
-            ([[2, 1], 3, 4], <class 'list'>)
-
-        (Used internally by :class:`Group` when `aslist=True`.)
-        """
-
-        def __new__(cls, contained=None):
-            if contained is None:
-                contained = []
-
-            if not isinstance(contained, list):
-                raise TypeError(
-                    "{} may only be constructed with a list,"
-                    " not {}".format(cls.__name__, type(contained).__name__)
-                )
-
-            return list.__new__(cls)
-
-    def __new__(cls, toklist=None, name=None, **kwargs):
-        if isinstance(toklist, ParseResults):
-            return toklist
-        self = object.__new__(cls)
-        self._name = None
-        self._parent = None
-        self._all_names = set()
-
-        if toklist is None:
-            self._toklist = []
-        elif isinstance(toklist, (list, _generator_type)):
-            self._toklist = (
-                [toklist[:]]
-                if isinstance(toklist, ParseResults.List)
-                else list(toklist)
-            )
-        else:
-            self._toklist = [toklist]
-        self._tokdict = dict()
-        return self
-
-    # Performance tuning: we construct a *lot* of these, so keep this
-    # constructor as small and fast as possible
-    def __init__(
-        self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
-    ):
-        self._modal = modal
-        if name is not None and name != "":
-            if isinstance(name, int):
-                name = str(name)
-            if not modal:
-                self._all_names = {name}
-            self._name = name
-            if toklist not in self._null_values:
-                if isinstance(toklist, (str_type, type)):
-                    toklist = [toklist]
-                if asList:
-                    if isinstance(toklist, ParseResults):
-                        self[name] = _ParseResultsWithOffset(
-                            ParseResults(toklist._toklist), 0
-                        )
-                    else:
-                        self[name] = _ParseResultsWithOffset(
-                            ParseResults(toklist[0]), 0
-                        )
-                    self[name]._name = name
-                else:
-                    try:
-                        self[name] = toklist[0]
-                    except (KeyError, TypeError, IndexError):
-                        if toklist is not self:
-                            self[name] = toklist
-                        else:
-                            self._name = name
-
-    def __getitem__(self, i):
-        if isinstance(i, (int, slice)):
-            return self._toklist[i]
-        else:
-            if i not in self._all_names:
-                return self._tokdict[i][-1][0]
-            else:
-                return ParseResults([v[0] for v in self._tokdict[i]])
-
-    def __setitem__(self, k, v, isinstance=isinstance):
-        if isinstance(v, _ParseResultsWithOffset):
-            self._tokdict[k] = self._tokdict.get(k, list()) + [v]
-            sub = v[0]
-        elif isinstance(k, (int, slice)):
-            self._toklist[k] = v
-            sub = v
-        else:
-            self._tokdict[k] = self._tokdict.get(k, list()) + [
-                _ParseResultsWithOffset(v, 0)
-            ]
-            sub = v
-        if isinstance(sub, ParseResults):
-            sub._parent = wkref(self)
-
-    def __delitem__(self, i):
-        if isinstance(i, (int, slice)):
-            mylen = len(self._toklist)
-            del self._toklist[i]
-
-            # convert int to slice
-            if isinstance(i, int):
-                if i < 0:
-                    i += mylen
-                i = slice(i, i + 1)
-            # get removed indices
-            removed = list(range(*i.indices(mylen)))
-            removed.reverse()
-            # fixup indices in token dictionary
-            for name, occurrences in self._tokdict.items():
-                for j in removed:
-                    for k, (value, position) in enumerate(occurrences):
-                        occurrences[k] = _ParseResultsWithOffset(
-                            value, position - (position > j)
-                        )
-        else:
-            del self._tokdict[i]
-
-    def __contains__(self, k) -> bool:
-        return k in self._tokdict
-
-    def __len__(self) -> int:
-        return len(self._toklist)
-
-    def __bool__(self) -> bool:
-        return not not (self._toklist or self._tokdict)
-
-    def __iter__(self) -> Iterator:
-        return iter(self._toklist)
-
-    def __reversed__(self) -> Iterator:
-        return iter(self._toklist[::-1])
-
-    def keys(self):
-        return iter(self._tokdict)
-
-    def values(self):
-        return (self[k] for k in self.keys())
-
-    def items(self):
-        return ((k, self[k]) for k in self.keys())
-
-    def haskeys(self) -> bool:
-        """
-        Since ``keys()`` returns an iterator, this method is helpful in bypassing
-        code that looks for the existence of any defined results names."""
-        return bool(self._tokdict)
-
-    def pop(self, *args, **kwargs):
-        """
-        Removes and returns item at specified index (default= ``last``).
-        Supports both ``list`` and ``dict`` semantics for ``pop()``. If
-        passed no argument or an integer argument, it will use ``list``
-        semantics and pop tokens from the list of parsed tokens. If passed
-        a non-integer argument (most likely a string), it will use ``dict``
-        semantics and pop the corresponding value from any defined results
-        names. A second default return value argument is supported, just as in
-        ``dict.pop()``.
-
-        Example::
-
-            numlist = Word(nums)[...]
-            print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
-
-            def remove_first(tokens):
-                tokens.pop(0)
-            numlist.add_parse_action(remove_first)
-            print(numlist.parse_string("0 123 321")) # -> ['123', '321']
-
-            label = Word(alphas)
-            patt = label("LABEL") + OneOrMore(Word(nums))
-            print(patt.parse_string("AAB 123 321").dump())
-
-            # Use pop() in a parse action to remove named result (note that corresponding value is not
-            # removed from list form of results)
-            def remove_LABEL(tokens):
-                tokens.pop("LABEL")
-                return tokens
-            patt.add_parse_action(remove_LABEL)
-            print(patt.parse_string("AAB 123 321").dump())
-
-        prints::
-
-            ['AAB', '123', '321']
-            - LABEL: 'AAB'
-
-            ['AAB', '123', '321']
-        """
-        if not args:
-            args = [-1]
-        for k, v in kwargs.items():
-            if k == "default":
-                args = (args[0], v)
-            else:
-                raise TypeError(
-                    "pop() got an unexpected keyword argument {!r}".format(k)
-                )
-        if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
-            index = args[0]
-            ret = self[index]
-            del self[index]
-            return ret
-        else:
-            defaultvalue = args[1]
-            return defaultvalue
-
-    def get(self, key, default_value=None):
-        """
-        Returns named result matching the given key, or if there is no
-        such name, then returns the given ``default_value`` or ``None`` if no
-        ``default_value`` is specified.
-
-        Similar to ``dict.get()``.
-
-        Example::
-
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-            result = date_str.parse_string("1999/12/31")
-            print(result.get("year")) # -> '1999'
-            print(result.get("hour", "not specified")) # -> 'not specified'
-            print(result.get("hour")) # -> None
-        """
-        if key in self:
-            return self[key]
-        else:
-            return default_value
-
-    def insert(self, index, ins_string):
-        """
-        Inserts new element at location index in the list of parsed tokens.
-
-        Similar to ``list.insert()``.
-
-        Example::
-
-            numlist = Word(nums)[...]
-            print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
-
-            # use a parse action to insert the parse location in the front of the parsed results
-            def insert_locn(locn, tokens):
-                tokens.insert(0, locn)
-            numlist.add_parse_action(insert_locn)
-            print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
-        """
-        self._toklist.insert(index, ins_string)
-        # fixup indices in token dictionary
-        for name, occurrences in self._tokdict.items():
-            for k, (value, position) in enumerate(occurrences):
-                occurrences[k] = _ParseResultsWithOffset(
-                    value, position + (position > index)
-                )
-
-    def append(self, item):
-        """
-        Add single element to end of ``ParseResults`` list of elements.
-
-        Example::
-
-            numlist = Word(nums)[...]
-            print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
-
-            # use a parse action to compute the sum of the parsed integers, and add it to the end
-            def append_sum(tokens):
-                tokens.append(sum(map(int, tokens)))
-            numlist.add_parse_action(append_sum)
-            print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
-        """
-        self._toklist.append(item)
-
-    def extend(self, itemseq):
-        """
-        Add sequence of elements to end of ``ParseResults`` list of elements.
-
-        Example::
-
-            patt = OneOrMore(Word(alphas))
-
-            # use a parse action to append the reverse of the matched strings, to make a palindrome
-            def make_palindrome(tokens):
-                tokens.extend(reversed([t[::-1] for t in tokens]))
-                return ''.join(tokens)
-            patt.add_parse_action(make_palindrome)
-            print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
-        """
-        if isinstance(itemseq, ParseResults):
-            self.__iadd__(itemseq)
-        else:
-            self._toklist.extend(itemseq)
-
-    def clear(self):
-        """
-        Clear all elements and results names.
-        """
-        del self._toklist[:]
-        self._tokdict.clear()
-
-    def __getattr__(self, name):
-        try:
-            return self[name]
-        except KeyError:
-            if name.startswith("__"):
-                raise AttributeError(name)
-            return ""
-
-    def __add__(self, other) -> "ParseResults":
-        ret = self.copy()
-        ret += other
-        return ret
-
-    def __iadd__(self, other) -> "ParseResults":
-        if other._tokdict:
-            offset = len(self._toklist)
-            addoffset = lambda a: offset if a < 0 else a + offset
-            otheritems = other._tokdict.items()
-            otherdictitems = [
-                (k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
-                for k, vlist in otheritems
-                for v in vlist
-            ]
-            for k, v in otherdictitems:
-                self[k] = v
-                if isinstance(v[0], ParseResults):
-                    v[0]._parent = wkref(self)
-
-        self._toklist += other._toklist
-        self._all_names |= other._all_names
-        return self
-
-    def __radd__(self, other) -> "ParseResults":
-        if isinstance(other, int) and other == 0:
-            # useful for merging many ParseResults using sum() builtin
-            return self.copy()
-        else:
-            # this may raise a TypeError - so be it
-            return other + self
-
-    def __repr__(self) -> str:
-        return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict())
-
-    def __str__(self) -> str:
-        return (
-            "["
-            + ", ".join(
-                [
-                    str(i) if isinstance(i, ParseResults) else repr(i)
-                    for i in self._toklist
-                ]
-            )
-            + "]"
-        )
-
-    def _asStringList(self, sep=""):
-        out = []
-        for item in self._toklist:
-            if out and sep:
-                out.append(sep)
-            if isinstance(item, ParseResults):
-                out += item._asStringList()
-            else:
-                out.append(str(item))
-        return out
-
-    def as_list(self) -> list:
-        """
-        Returns the parse results as a nested list of matching tokens, all converted to strings.
-
-        Example::
-
-            patt = OneOrMore(Word(alphas))
-            result = patt.parse_string("sldkj lsdkj sldkj")
-            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
-            print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
-
-            # Use as_list() to create an actual list
-            result_list = result.as_list()
-            print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
-        """
-        return [
-            res.as_list() if isinstance(res, ParseResults) else res
-            for res in self._toklist
-        ]
-
-    def as_dict(self) -> dict:
-        """
-        Returns the named parse results as a nested dictionary.
-
-        Example::
-
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-            result = date_str.parse_string('12/31/1999')
-            print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
-
-            result_dict = result.as_dict()
-            print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
-
-            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
-            import json
-            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
-            print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
-        """
-
-        def to_item(obj):
-            if isinstance(obj, ParseResults):
-                return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
-            else:
-                return obj
-
-        return dict((k, to_item(v)) for k, v in self.items())
-
-    def copy(self) -> "ParseResults":
-        """
-        Returns a new copy of a :class:`ParseResults` object.
-        """
-        ret = ParseResults(self._toklist)
-        ret._tokdict = self._tokdict.copy()
-        ret._parent = self._parent
-        ret._all_names |= self._all_names
-        ret._name = self._name
-        return ret
-
-    def get_name(self):
-        r"""
-        Returns the results name for this token expression. Useful when several
-        different expressions might match at a particular location.
-
-        Example::
-
-            integer = Word(nums)
-            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
-            house_number_expr = Suppress('#') + Word(nums, alphanums)
-            user_data = (Group(house_number_expr)("house_number")
-                        | Group(ssn_expr)("ssn")
-                        | Group(integer)("age"))
-            user_info = OneOrMore(user_data)
-
-            result = user_info.parse_string("22 111-22-3333 #221B")
-            for item in result:
-                print(item.get_name(), ':', item[0])
-
-        prints::
-
-            age : 22
-            ssn : 111-22-3333
-            house_number : 221B
-        """
-        if self._name:
-            return self._name
-        elif self._parent:
-            par = self._parent()
-
-            def find_in_parent(sub):
-                return next(
-                    (
-                        k
-                        for k, vlist in par._tokdict.items()
-                        for v, loc in vlist
-                        if sub is v
-                    ),
-                    None,
-                )
-
-            return find_in_parent(self) if par else None
-        elif (
-            len(self) == 1
-            and len(self._tokdict) == 1
-            and next(iter(self._tokdict.values()))[0][1] in (0, -1)
-        ):
-            return next(iter(self._tokdict.keys()))
-        else:
-            return None
-
-    def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
-        """
-        Diagnostic method for listing out the contents of
-        a :class:`ParseResults`. Accepts an optional ``indent`` argument so
-        that this string can be embedded in a nested display of other data.
-
-        Example::
-
-            integer = Word(nums)
-            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
-            result = date_str.parse_string('1999/12/31')
-            print(result.dump())
-
-        prints::
-
-            ['1999', '/', '12', '/', '31']
-            - day: '31'
-            - month: '12'
-            - year: '1999'
-        """
-        out = []
-        NL = "\n"
-        out.append(indent + str(self.as_list()) if include_list else "")
-
-        if full:
-            if self.haskeys():
-                items = sorted((str(k), v) for k, v in self.items())
-                for k, v in items:
-                    if out:
-                        out.append(NL)
-                    out.append("{}{}- {}: ".format(indent, ("  " * _depth), k))
-                    if isinstance(v, ParseResults):
-                        if v:
-                            out.append(
-                                v.dump(
-                                    indent=indent,
-                                    full=full,
-                                    include_list=include_list,
-                                    _depth=_depth + 1,
-                                )
-                            )
-                        else:
-                            out.append(str(v))
-                    else:
-                        out.append(repr(v))
-            if any(isinstance(vv, ParseResults) for vv in self):
-                v = self
-                for i, vv in enumerate(v):
-                    if isinstance(vv, ParseResults):
-                        out.append(
-                            "\n{}{}[{}]:\n{}{}{}".format(
-                                indent,
-                                ("  " * (_depth)),
-                                i,
-                                indent,
-                                ("  " * (_depth + 1)),
-                                vv.dump(
-                                    indent=indent,
-                                    full=full,
-                                    include_list=include_list,
-                                    _depth=_depth + 1,
-                                ),
-                            )
-                        )
-                    else:
-                        out.append(
-                            "\n%s%s[%d]:\n%s%s%s"
-                            % (
-                                indent,
-                                ("  " * (_depth)),
-                                i,
-                                indent,
-                                ("  " * (_depth + 1)),
-                                str(vv),
-                            )
-                        )
-
-        return "".join(out)
-
-    def pprint(self, *args, **kwargs):
-        """
-        Pretty-printer for parsed results as a list, using the
-        `pprint <https://docs.python.org/3/library/pprint.html>`_ module.
-        Accepts additional positional or keyword args as defined for
-        `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
-
-        Example::
-
-            ident = Word(alphas, alphanums)
-            num = Word(nums)
-            func = Forward()
-            term = ident | num | Group('(' + func + ')')
-            func <<= ident + Group(Optional(delimited_list(term)))
-            result = func.parse_string("fna a,b,(fnb c,d,200),100")
-            result.pprint(width=40)
-
-        prints::
-
-            ['fna',
-             ['a',
-              'b',
-              ['(', 'fnb', ['c', 'd', '200'], ')'],
-              '100']]
-        """
-        pprint.pprint(self.as_list(), *args, **kwargs)
-
-    # add support for pickle protocol
-    def __getstate__(self):
-        return (
-            self._toklist,
-            (
-                self._tokdict.copy(),
-                self._parent is not None and self._parent() or None,
-                self._all_names,
-                self._name,
-            ),
-        )
-
-    def __setstate__(self, state):
-        self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
-        self._all_names = set(inAccumNames)
-        if par is not None:
-            self._parent = wkref(par)
-        else:
-            self._parent = None
-
-    def __getnewargs__(self):
-        return self._toklist, self._name
-
-    def __dir__(self):
-        return dir(type(self)) + list(self.keys())
-
-    @classmethod
-    def from_dict(cls, other, name=None) -> "ParseResults":
-        """
-        Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
-        name-value relations as results names. If an optional ``name`` argument is
-        given, a nested ``ParseResults`` will be returned.
-        """
-
-        def is_iterable(obj):
-            try:
-                iter(obj)
-            except Exception:
-                return False
-            else:
-                return not isinstance(obj, str_type)
-
-        ret = cls([])
-        for k, v in other.items():
-            if isinstance(v, Mapping):
-                ret += cls.from_dict(v, name=k)
-            else:
-                ret += cls([v], name=k, asList=is_iterable(v))
-        if name is not None:
-            ret = cls([ret], name=name)
-        return ret
-
-    asList = as_list
-    asDict = as_dict
-    getName = get_name
-
-
-MutableMapping.register(ParseResults)
-MutableSequence.register(ParseResults)
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/testing.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/testing.py
deleted file mode 100644
index 991972f..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/testing.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# testing.py
-
-from contextlib import contextmanager
-from typing import Optional
-
-from .core import (
-    ParserElement,
-    ParseException,
-    Keyword,
-    __diag__,
-    __compat__,
-)
-
-
-class pyparsing_test:
-    """
-    namespace class for classes useful in writing unit tests
-    """
-
-    class reset_pyparsing_context:
-        """
-        Context manager to be used when writing unit tests that modify pyparsing config values:
-        - packrat parsing
-        - bounded recursion parsing
-        - default whitespace characters.
-        - default keyword characters
-        - literal string auto-conversion class
-        - __diag__ settings
-
-        Example::
-
-            with reset_pyparsing_context():
-                # test that literals used to construct a grammar are automatically suppressed
-                ParserElement.inlineLiteralsUsing(Suppress)
-
-                term = Word(alphas) | Word(nums)
-                group = Group('(' + term[...] + ')')
-
-                # assert that the '()' characters are not included in the parsed tokens
-                self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
-
-            # after exiting context manager, literals are converted to Literal expressions again
-        """
-
-        def __init__(self):
-            self._save_context = {}
-
-        def save(self):
-            self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
-            self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
-
-            self._save_context[
-                "literal_string_class"
-            ] = ParserElement._literalStringClass
-
-            self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
-
-            self._save_context["packrat_enabled"] = ParserElement._packratEnabled
-            if ParserElement._packratEnabled:
-                self._save_context[
-                    "packrat_cache_size"
-                ] = ParserElement.packrat_cache.size
-            else:
-                self._save_context["packrat_cache_size"] = None
-            self._save_context["packrat_parse"] = ParserElement._parse
-            self._save_context[
-                "recursion_enabled"
-            ] = ParserElement._left_recursion_enabled
-
-            self._save_context["__diag__"] = {
-                name: getattr(__diag__, name) for name in __diag__._all_names
-            }
-
-            self._save_context["__compat__"] = {
-                "collect_all_And_tokens": __compat__.collect_all_And_tokens
-            }
-
-            return self
-
-        def restore(self):
-            # reset pyparsing global state
-            if (
-                ParserElement.DEFAULT_WHITE_CHARS
-                != self._save_context["default_whitespace"]
-            ):
-                ParserElement.set_default_whitespace_chars(
-                    self._save_context["default_whitespace"]
-                )
-
-            ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
-
-            Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
-            ParserElement.inlineLiteralsUsing(
-                self._save_context["literal_string_class"]
-            )
-
-            for name, value in self._save_context["__diag__"].items():
-                (__diag__.enable if value else __diag__.disable)(name)
-
-            ParserElement._packratEnabled = False
-            if self._save_context["packrat_enabled"]:
-                ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
-            else:
-                ParserElement._parse = self._save_context["packrat_parse"]
-            ParserElement._left_recursion_enabled = self._save_context[
-                "recursion_enabled"
-            ]
-
-            __compat__.collect_all_And_tokens = self._save_context["__compat__"]
-
-            return self
-
-        def copy(self):
-            ret = type(self)()
-            ret._save_context.update(self._save_context)
-            return ret
-
-        def __enter__(self):
-            return self.save()
-
-        def __exit__(self, *args):
-            self.restore()
-
-    class TestParseResultsAsserts:
-        """
-        A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
-        """
-
-        def assertParseResultsEquals(
-            self, result, expected_list=None, expected_dict=None, msg=None
-        ):
-            """
-            Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
-            and compare any defined results names with an optional ``expected_dict``.
-            """
-            if expected_list is not None:
-                self.assertEqual(expected_list, result.as_list(), msg=msg)
-            if expected_dict is not None:
-                self.assertEqual(expected_dict, result.as_dict(), msg=msg)
-
-        def assertParseAndCheckList(
-            self, expr, test_string, expected_list, msg=None, verbose=True
-        ):
-            """
-            Convenience wrapper assert to test a parser element and input string, and assert that
-            the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
-            """
-            result = expr.parse_string(test_string, parse_all=True)
-            if verbose:
-                print(result.dump())
-            else:
-                print(result.as_list())
-            self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
-
-        def assertParseAndCheckDict(
-            self, expr, test_string, expected_dict, msg=None, verbose=True
-        ):
-            """
-            Convenience wrapper assert to test a parser element and input string, and assert that
-            the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
-            """
-            result = expr.parse_string(test_string, parseAll=True)
-            if verbose:
-                print(result.dump())
-            else:
-                print(result.as_list())
-            self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
-
-        def assertRunTestResults(
-            self, run_tests_report, expected_parse_results=None, msg=None
-        ):
-            """
-            Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
-            list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
-            with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
-            Finally, asserts that the overall ``runTests()`` success value is ``True``.
-
-            :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
-            :param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
-            """
-            run_test_success, run_test_results = run_tests_report
-
-            if expected_parse_results is not None:
-                merged = [
-                    (*rpt, expected)
-                    for rpt, expected in zip(run_test_results, expected_parse_results)
-                ]
-                for test_string, result, expected in merged:
-                    # expected should be a tuple containing a list and/or a dict or an exception,
-                    # and optional failure message string
-                    # an empty tuple will skip any result validation
-                    fail_msg = next(
-                        (exp for exp in expected if isinstance(exp, str)), None
-                    )
-                    expected_exception = next(
-                        (
-                            exp
-                            for exp in expected
-                            if isinstance(exp, type) and issubclass(exp, Exception)
-                        ),
-                        None,
-                    )
-                    if expected_exception is not None:
-                        with self.assertRaises(
-                            expected_exception=expected_exception, msg=fail_msg or msg
-                        ):
-                            if isinstance(result, Exception):
-                                raise result
-                    else:
-                        expected_list = next(
-                            (exp for exp in expected if isinstance(exp, list)), None
-                        )
-                        expected_dict = next(
-                            (exp for exp in expected if isinstance(exp, dict)), None
-                        )
-                        if (expected_list, expected_dict) != (None, None):
-                            self.assertParseResultsEquals(
-                                result,
-                                expected_list=expected_list,
-                                expected_dict=expected_dict,
-                                msg=fail_msg or msg,
-                            )
-                        else:
-                            # warning here maybe?
-                            print("no validation for {!r}".format(test_string))
-
-            # do this last, in case some specific test results can be reported instead
-            self.assertTrue(
-                run_test_success, msg=msg if msg is not None else "failed runTests"
-            )
-
-        @contextmanager
-        def assertRaisesParseException(self, exc_type=ParseException, msg=None):
-            with self.assertRaises(exc_type, msg=msg):
-                yield
-
-    @staticmethod
-    def with_line_numbers(
-        s: str,
-        start_line: Optional[int] = None,
-        end_line: Optional[int] = None,
-        expand_tabs: bool = True,
-        eol_mark: str = "|",
-        mark_spaces: Optional[str] = None,
-        mark_control: Optional[str] = None,
-    ) -> str:
-        """
-        Helpful method for debugging a parser - prints a string with line and column numbers.
-        (Line and column numbers are 1-based.)
-
-        :param s: tuple(bool, str - string to be printed with line and column numbers
-        :param start_line: int - (optional) starting line number in s to print (default=1)
-        :param end_line: int - (optional) ending line number in s to print (default=len(s))
-        :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
-        :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
-        :param mark_spaces: str - (optional) special character to display in place of spaces
-        :param mark_control: str - (optional) convert non-printing control characters to a placeholding
-                                 character; valid values:
-                                 - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
-                                 - any single character string - replace control characters with given string
-                                 - None (default) - string is displayed as-is
-
-        :return: str - input string with leading line numbers and column number headers
-        """
-        if expand_tabs:
-            s = s.expandtabs()
-        if mark_control is not None:
-            if mark_control == "unicode":
-                tbl = str.maketrans(
-                    {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
-                    | {127: 0x2421}
-                )
-                eol_mark = ""
-            else:
-                tbl = str.maketrans(
-                    {c: mark_control for c in list(range(0, 32)) + [127]}
-                )
-            s = s.translate(tbl)
-        if mark_spaces is not None and mark_spaces != " ":
-            if mark_spaces == "unicode":
-                tbl = str.maketrans({9: 0x2409, 32: 0x2423})
-                s = s.translate(tbl)
-            else:
-                s = s.replace(" ", mark_spaces)
-        if start_line is None:
-            start_line = 1
-        if end_line is None:
-            end_line = len(s)
-        end_line = min(end_line, len(s))
-        start_line = min(max(1, start_line), end_line)
-
-        if mark_control != "unicode":
-            s_lines = s.splitlines()[start_line - 1 : end_line]
-        else:
-            s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
-        if not s_lines:
-            return ""
-
-        lineno_width = len(str(end_line))
-        max_line_len = max(len(line) for line in s_lines)
-        lead = " " * (lineno_width + 1)
-        if max_line_len >= 99:
-            header0 = (
-                lead
-                + "".join(
-                    "{}{}".format(" " * 99, (i + 1) % 100)
-                    for i in range(max(max_line_len // 100, 1))
-                )
-                + "\n"
-            )
-        else:
-            header0 = ""
-        header1 = (
-            header0
-            + lead
-            + "".join(
-                "         {}".format((i + 1) % 10)
-                for i in range(-(-max_line_len // 10))
-            )
-            + "\n"
-        )
-        header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
-        return (
-            header1
-            + header2
-            + "\n".join(
-                "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
-                for i, line in enumerate(s_lines, start=start_line)
-            )
-            + "\n"
-        )
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/unicode.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/unicode.py
deleted file mode 100644
index 9226148..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/unicode.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# unicode.py
-
-import sys
-from itertools import filterfalse
-from typing import List, Tuple, Union
-
-
-class _lazyclassproperty:
-    def __init__(self, fn):
-        self.fn = fn
-        self.__doc__ = fn.__doc__
-        self.__name__ = fn.__name__
-
-    def __get__(self, obj, cls):
-        if cls is None:
-            cls = type(obj)
-        if not hasattr(cls, "_intern") or any(
-            cls._intern is getattr(superclass, "_intern", [])
-            for superclass in cls.__mro__[1:]
-        ):
-            cls._intern = {}
-        attrname = self.fn.__name__
-        if attrname not in cls._intern:
-            cls._intern[attrname] = self.fn(cls)
-        return cls._intern[attrname]
-
-
-UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
-
-
-class unicode_set:
-    """
-    A set of Unicode characters, for language-specific strings for
-    ``alphas``, ``nums``, ``alphanums``, and ``printables``.
-    A unicode_set is defined by a list of ranges in the Unicode character
-    set, in a class attribute ``_ranges``. Ranges can be specified using
-    2-tuples or a 1-tuple, such as::
-
-        _ranges = [
-            (0x0020, 0x007e),
-            (0x00a0, 0x00ff),
-            (0x0100,),
-            ]
-
-    Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
-
-    A unicode set can also be defined using multiple inheritance of other unicode sets::
-
-        class CJK(Chinese, Japanese, Korean):
-            pass
-    """
-
-    _ranges: UnicodeRangeList = []
-
-    @_lazyclassproperty
-    def _chars_for_ranges(cls):
-        ret = []
-        for cc in cls.__mro__:
-            if cc is unicode_set:
-                break
-            for rr in getattr(cc, "_ranges", ()):
-                ret.extend(range(rr[0], rr[-1] + 1))
-        return [chr(c) for c in sorted(set(ret))]
-
-    @_lazyclassproperty
-    def printables(cls):
-        "all non-whitespace characters in this range"
-        return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
-
-    @_lazyclassproperty
-    def alphas(cls):
-        "all alphabetic characters in this range"
-        return "".join(filter(str.isalpha, cls._chars_for_ranges))
-
-    @_lazyclassproperty
-    def nums(cls):
-        "all numeric digit characters in this range"
-        return "".join(filter(str.isdigit, cls._chars_for_ranges))
-
-    @_lazyclassproperty
-    def alphanums(cls):
-        "all alphanumeric characters in this range"
-        return cls.alphas + cls.nums
-
-    @_lazyclassproperty
-    def identchars(cls):
-        "all characters in this range that are valid identifier characters, plus underscore '_'"
-        return "".join(
-            sorted(
-                set(
-                    "".join(filter(str.isidentifier, cls._chars_for_ranges))
-                    + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
-                    + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
-                    + "_"
-                )
-            )
-        )
-
-    @_lazyclassproperty
-    def identbodychars(cls):
-        """
-        all characters in this range that are valid identifier body characters,
-        plus the digits 0-9
-        """
-        return "".join(
-            sorted(
-                set(
-                    cls.identchars
-                    + "0123456789"
-                    + "".join(
-                        [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
-                    )
-                )
-            )
-        )
-
-
-class pyparsing_unicode(unicode_set):
-    """
-    A namespace class for defining common language unicode_sets.
-    """
-
-    _ranges: UnicodeRangeList = [(32, sys.maxunicode)]
-
-    class Latin1(unicode_set):
-        "Unicode set for Latin-1 Unicode Character Range"
-        _ranges: UnicodeRangeList = [
-            (0x0020, 0x007E),
-            (0x00A0, 0x00FF),
-        ]
-
-    class LatinA(unicode_set):
-        "Unicode set for Latin-A Unicode Character Range"
-        _ranges: UnicodeRangeList = [
-            (0x0100, 0x017F),
-        ]
-
-    class LatinB(unicode_set):
-        "Unicode set for Latin-B Unicode Character Range"
-        _ranges: UnicodeRangeList = [
-            (0x0180, 0x024F),
-        ]
-
-    class Greek(unicode_set):
-        "Unicode set for Greek Unicode Character Ranges"
-        _ranges: UnicodeRangeList = [
-            (0x0342, 0x0345),
-            (0x0370, 0x0377),
-            (0x037A, 0x037F),
-            (0x0384, 0x038A),
-            (0x038C,),
-            (0x038E, 0x03A1),
-            (0x03A3, 0x03E1),
-            (0x03F0, 0x03FF),
-            (0x1D26, 0x1D2A),
-            (0x1D5E,),
-            (0x1D60,),
-            (0x1D66, 0x1D6A),
-            (0x1F00, 0x1F15),
-            (0x1F18, 0x1F1D),
-            (0x1F20, 0x1F45),
-            (0x1F48, 0x1F4D),
-            (0x1F50, 0x1F57),
-            (0x1F59,),
-            (0x1F5B,),
-            (0x1F5D,),
-            (0x1F5F, 0x1F7D),
-            (0x1F80, 0x1FB4),
-            (0x1FB6, 0x1FC4),
-            (0x1FC6, 0x1FD3),
-            (0x1FD6, 0x1FDB),
-            (0x1FDD, 0x1FEF),
-            (0x1FF2, 0x1FF4),
-            (0x1FF6, 0x1FFE),
-            (0x2129,),
-            (0x2719, 0x271A),
-            (0xAB65,),
-            (0x10140, 0x1018D),
-            (0x101A0,),
-            (0x1D200, 0x1D245),
-            (0x1F7A1, 0x1F7A7),
-        ]
-
-    class Cyrillic(unicode_set):
-        "Unicode set for Cyrillic Unicode Character Range"
-        _ranges: UnicodeRangeList = [
-            (0x0400, 0x052F),
-            (0x1C80, 0x1C88),
-            (0x1D2B,),
-            (0x1D78,),
-            (0x2DE0, 0x2DFF),
-            (0xA640, 0xA672),
-            (0xA674, 0xA69F),
-            (0xFE2E, 0xFE2F),
-        ]
-
-    class Chinese(unicode_set):
-        "Unicode set for Chinese Unicode Character Range"
-        _ranges: UnicodeRangeList = [
-            (0x2E80, 0x2E99),
-            (0x2E9B, 0x2EF3),
-            (0x31C0, 0x31E3),
-            (0x3400, 0x4DB5),
-            (0x4E00, 0x9FEF),
-            (0xA700, 0xA707),
-            (0xF900, 0xFA6D),
-            (0xFA70, 0xFAD9),
-            (0x16FE2, 0x16FE3),
-            (0x1F210, 0x1F212),
-            (0x1F214, 0x1F23B),
-            (0x1F240, 0x1F248),
-            (0x20000, 0x2A6D6),
-            (0x2A700, 0x2B734),
-            (0x2B740, 0x2B81D),
-            (0x2B820, 0x2CEA1),
-            (0x2CEB0, 0x2EBE0),
-            (0x2F800, 0x2FA1D),
-        ]
-
-    class Japanese(unicode_set):
-        "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
-        _ranges: UnicodeRangeList = []
-
-        class Kanji(unicode_set):
-            "Unicode set for Kanji Unicode Character Range"
-            _ranges: UnicodeRangeList = [
-                (0x4E00, 0x9FBF),
-                (0x3000, 0x303F),
-            ]
-
-        class Hiragana(unicode_set):
-            "Unicode set for Hiragana Unicode Character Range"
-            _ranges: UnicodeRangeList = [
-                (0x3041, 0x3096),
-                (0x3099, 0x30A0),
-                (0x30FC,),
-                (0xFF70,),
-                (0x1B001,),
-                (0x1B150, 0x1B152),
-                (0x1F200,),
-            ]
-
-        class Katakana(unicode_set):
-            "Unicode set for Katakana  Unicode Character Range"
-            _ranges: UnicodeRangeList = [
-                (0x3099, 0x309C),
-                (0x30A0, 0x30FF),
-                (0x31F0, 0x31FF),
-                (0x32D0, 0x32FE),
-                (0xFF65, 0xFF9F),
-                (0x1B000,),
-                (0x1B164, 0x1B167),
-                (0x1F201, 0x1F202),
-                (0x1F213,),
-            ]
-
-    class Hangul(unicode_set):
-        "Unicode set for Hangul (Korean) Unicode Character Range"
-        _ranges: UnicodeRangeList = [
-            (0x1100, 0x11FF),
-            (0x302E, 0x302F),
-            (0x3131, 0x318E),
-            (0x3200, 0x321C),
-            (0x3260, 0x327B),
-            (0x327E,),
-            (0xA960, 0xA97C),
-            (0xAC00, 0xD7A3),
-            (0xD7B0, 0xD7C6),
-            (0xD7CB, 0xD7FB),
-            (0xFFA0, 0xFFBE),
-            (0xFFC2, 0xFFC7),
-            (0xFFCA, 0xFFCF),
-            (0xFFD2, 0xFFD7),
-            (0xFFDA, 0xFFDC),
-        ]
-
-    Korean = Hangul
-
-    class CJK(Chinese, Japanese, Hangul):
-        "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
-        pass
-
-    class Thai(unicode_set):
-        "Unicode set for Thai Unicode Character Range"
-        _ranges: UnicodeRangeList = [(0x0E01, 0x0E3A), (0x0E3F, 0x0E5B)]
-
-    class Arabic(unicode_set):
-        "Unicode set for Arabic Unicode Character Range"
-        _ranges: UnicodeRangeList = [
-            (0x0600, 0x061B),
-            (0x061E, 0x06FF),
-            (0x0700, 0x077F),
-        ]
-
-    class Hebrew(unicode_set):
-        "Unicode set for Hebrew Unicode Character Range"
-        _ranges: UnicodeRangeList = [
-            (0x0591, 0x05C7),
-            (0x05D0, 0x05EA),
-            (0x05EF, 0x05F4),
-            (0xFB1D, 0xFB36),
-            (0xFB38, 0xFB3C),
-            (0xFB3E,),
-            (0xFB40, 0xFB41),
-            (0xFB43, 0xFB44),
-            (0xFB46, 0xFB4F),
-        ]
-
-    class Devanagari(unicode_set):
-        "Unicode set for Devanagari Unicode Character Range"
-        _ranges: UnicodeRangeList = [(0x0900, 0x097F), (0xA8E0, 0xA8FF)]
-
-
-pyparsing_unicode.Japanese._ranges = (
-    pyparsing_unicode.Japanese.Kanji._ranges
-    + pyparsing_unicode.Japanese.Hiragana._ranges
-    + pyparsing_unicode.Japanese.Katakana._ranges
-)
-
-# define ranges in language character sets
-pyparsing_unicode.العربية = pyparsing_unicode.Arabic
-pyparsing_unicode.中文 = pyparsing_unicode.Chinese
-pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
-pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
-pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
-pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
-pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
-pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
-pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
-pyparsing_unicode.한국어 = pyparsing_unicode.Korean
-pyparsing_unicode.ไทย = pyparsing_unicode.Thai
-pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/util.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/util.py
deleted file mode 100644
index 34ce092..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/pyparsing/util.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# util.py
-import warnings
-import types
-import collections
-import itertools
-from functools import lru_cache
-from typing import List, Union, Iterable
-
-_bslash = chr(92)
-
-
-class __config_flags:
-    """Internal class for defining compatibility and debugging flags"""
-
-    _all_names: List[str] = []
-    _fixed_names: List[str] = []
-    _type_desc = "configuration"
-
-    @classmethod
-    def _set(cls, dname, value):
-        if dname in cls._fixed_names:
-            warnings.warn(
-                "{}.{} {} is {} and cannot be overridden".format(
-                    cls.__name__,
-                    dname,
-                    cls._type_desc,
-                    str(getattr(cls, dname)).upper(),
-                )
-            )
-            return
-        if dname in cls._all_names:
-            setattr(cls, dname, value)
-        else:
-            raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
-
-    enable = classmethod(lambda cls, name: cls._set(name, True))
-    disable = classmethod(lambda cls, name: cls._set(name, False))
-
-
-@lru_cache(maxsize=128)
-def col(loc: int, strg: str) -> int:
-    """
-    Returns current column within a string, counting newlines as line separators.
-    The first column is number 1.
-
-    Note: the default parsing behavior is to expand tabs in the input string
-    before starting the parsing process.  See
-    :class:`ParserElement.parseString` for more
-    information on parsing strings containing ``<TAB>`` s, and suggested
-    methods to maintain a consistent view of the parsed string, the parse
-    location, and line and column positions within the parsed string.
-    """
-    s = strg
-    return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
-
-
-@lru_cache(maxsize=128)
-def lineno(loc: int, strg: str) -> int:
-    """Returns current line number within a string, counting newlines as line separators.
-    The first line is number 1.
-
-    Note - the default parsing behavior is to expand tabs in the input string
-    before starting the parsing process.  See :class:`ParserElement.parseString`
-    for more information on parsing strings containing ``<TAB>`` s, and
-    suggested methods to maintain a consistent view of the parsed string, the
-    parse location, and line and column positions within the parsed string.
-    """
-    return strg.count("\n", 0, loc) + 1
-
-
-@lru_cache(maxsize=128)
-def line(loc: int, strg: str) -> str:
-    """
-    Returns the line of text containing loc within a string, counting newlines as line separators.
-    """
-    last_cr = strg.rfind("\n", 0, loc)
-    next_cr = strg.find("\n", loc)
-    return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
-
-
-class _UnboundedCache:
-    def __init__(self):
-        cache = {}
-        cache_get = cache.get
-        self.not_in_cache = not_in_cache = object()
-
-        def get(_, key):
-            return cache_get(key, not_in_cache)
-
-        def set_(_, key, value):
-            cache[key] = value
-
-        def clear(_):
-            cache.clear()
-
-        self.size = None
-        self.get = types.MethodType(get, self)
-        self.set = types.MethodType(set_, self)
-        self.clear = types.MethodType(clear, self)
-
-
-class _FifoCache:
-    def __init__(self, size):
-        self.not_in_cache = not_in_cache = object()
-        cache = collections.OrderedDict()
-        cache_get = cache.get
-
-        def get(_, key):
-            return cache_get(key, not_in_cache)
-
-        def set_(_, key, value):
-            cache[key] = value
-            while len(cache) > size:
-                cache.popitem(last=False)
-
-        def clear(_):
-            cache.clear()
-
-        self.size = size
-        self.get = types.MethodType(get, self)
-        self.set = types.MethodType(set_, self)
-        self.clear = types.MethodType(clear, self)
-
-
-class LRUMemo:
-    """
-    A memoizing mapping that retains `capacity` deleted items
-
-    The memo tracks retained items by their access order; once `capacity` items
-    are retained, the least recently used item is discarded.
-    """
-
-    def __init__(self, capacity):
-        self._capacity = capacity
-        self._active = {}
-        self._memory = collections.OrderedDict()
-
-    def __getitem__(self, key):
-        try:
-            return self._active[key]
-        except KeyError:
-            self._memory.move_to_end(key)
-            return self._memory[key]
-
-    def __setitem__(self, key, value):
-        self._memory.pop(key, None)
-        self._active[key] = value
-
-    def __delitem__(self, key):
-        try:
-            value = self._active.pop(key)
-        except KeyError:
-            pass
-        else:
-            while len(self._memory) >= self._capacity:
-                self._memory.popitem(last=False)
-            self._memory[key] = value
-
-    def clear(self):
-        self._active.clear()
-        self._memory.clear()
-
-
-class UnboundedMemo(dict):
-    """
-    A memoizing mapping that retains all deleted items
-    """
-
-    def __delitem__(self, key):
-        pass
-
-
-def _escape_regex_range_chars(s: str) -> str:
-    # escape these chars: ^-[]
-    for c in r"\^-[]":
-        s = s.replace(c, _bslash + c)
-    s = s.replace("\n", r"\n")
-    s = s.replace("\t", r"\t")
-    return str(s)
-
-
-def _collapse_string_to_ranges(
-    s: Union[str, Iterable[str]], re_escape: bool = True
-) -> str:
-    def is_consecutive(c):
-        c_int = ord(c)
-        is_consecutive.prev, prev = c_int, is_consecutive.prev
-        if c_int - prev > 1:
-            is_consecutive.value = next(is_consecutive.counter)
-        return is_consecutive.value
-
-    is_consecutive.prev = 0
-    is_consecutive.counter = itertools.count()
-    is_consecutive.value = -1
-
-    def escape_re_range_char(c):
-        return "\\" + c if c in r"\^-][" else c
-
-    def no_escape_re_range_char(c):
-        return c
-
-    if not re_escape:
-        escape_re_range_char = no_escape_re_range_char
-
-    ret = []
-    s = "".join(sorted(set(s)))
-    if len(s) > 3:
-        for _, chars in itertools.groupby(s, key=is_consecutive):
-            first = last = next(chars)
-            last = collections.deque(
-                itertools.chain(iter([last]), chars), maxlen=1
-            ).pop()
-            if first == last:
-                ret.append(escape_re_range_char(first))
-            else:
-                sep = "" if ord(last) == ord(first) + 1 else "-"
-                ret.append(
-                    "{}{}{}".format(
-                        escape_re_range_char(first), sep, escape_re_range_char(last)
-                    )
-                )
-    else:
-        ret = [escape_re_range_char(c) for c in s]
-
-    return "".join(ret)
-
-
-def _flatten(ll: list) -> list:
-    ret = []
-    for i in ll:
-        if isinstance(i, list):
-            ret.extend(_flatten(i))
-        else:
-            ret.append(i)
-    return ret
diff --git a/env/lib/python3.10/site-packages/pkg_resources/_vendor/zipp.py b/env/lib/python3.10/site-packages/pkg_resources/_vendor/zipp.py
deleted file mode 100644
index 26b723c..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/_vendor/zipp.py
+++ /dev/null
@@ -1,329 +0,0 @@
-import io
-import posixpath
-import zipfile
-import itertools
-import contextlib
-import sys
-import pathlib
-
-if sys.version_info < (3, 7):
-    from collections import OrderedDict
-else:
-    OrderedDict = dict
-
-
-__all__ = ['Path']
-
-
-def _parents(path):
-    """
-    Given a path with elements separated by
-    posixpath.sep, generate all parents of that path.
-
-    >>> list(_parents('b/d'))
-    ['b']
-    >>> list(_parents('/b/d/'))
-    ['/b']
-    >>> list(_parents('b/d/f/'))
-    ['b/d', 'b']
-    >>> list(_parents('b'))
-    []
-    >>> list(_parents(''))
-    []
-    """
-    return itertools.islice(_ancestry(path), 1, None)
-
-
-def _ancestry(path):
-    """
-    Given a path with elements separated by
-    posixpath.sep, generate all elements of that path
-
-    >>> list(_ancestry('b/d'))
-    ['b/d', 'b']
-    >>> list(_ancestry('/b/d/'))
-    ['/b/d', '/b']
-    >>> list(_ancestry('b/d/f/'))
-    ['b/d/f', 'b/d', 'b']
-    >>> list(_ancestry('b'))
-    ['b']
-    >>> list(_ancestry(''))
-    []
-    """
-    path = path.rstrip(posixpath.sep)
-    while path and path != posixpath.sep:
-        yield path
-        path, tail = posixpath.split(path)
-
-
-_dedupe = OrderedDict.fromkeys
-"""Deduplicate an iterable in original order"""
-
-
-def _difference(minuend, subtrahend):
-    """
-    Return items in minuend not in subtrahend, retaining order
-    with O(1) lookup.
-    """
-    return itertools.filterfalse(set(subtrahend).__contains__, minuend)
-
-
-class CompleteDirs(zipfile.ZipFile):
-    """
-    A ZipFile subclass that ensures that implied directories
-    are always included in the namelist.
-    """
-
-    @staticmethod
-    def _implied_dirs(names):
-        parents = itertools.chain.from_iterable(map(_parents, names))
-        as_dirs = (p + posixpath.sep for p in parents)
-        return _dedupe(_difference(as_dirs, names))
-
-    def namelist(self):
-        names = super(CompleteDirs, self).namelist()
-        return names + list(self._implied_dirs(names))
-
-    def _name_set(self):
-        return set(self.namelist())
-
-    def resolve_dir(self, name):
-        """
-        If the name represents a directory, return that name
-        as a directory (with the trailing slash).
-        """
-        names = self._name_set()
-        dirname = name + '/'
-        dir_match = name not in names and dirname in names
-        return dirname if dir_match else name
-
-    @classmethod
-    def make(cls, source):
-        """
-        Given a source (filename or zipfile), return an
-        appropriate CompleteDirs subclass.
-        """
-        if isinstance(source, CompleteDirs):
-            return source
-
-        if not isinstance(source, zipfile.ZipFile):
-            return cls(_pathlib_compat(source))
-
-        # Only allow for FastLookup when supplied zipfile is read-only
-        if 'r' not in source.mode:
-            cls = CompleteDirs
-
-        source.__class__ = cls
-        return source
-
-
-class FastLookup(CompleteDirs):
-    """
-    ZipFile subclass to ensure implicit
-    dirs exist and are resolved rapidly.
-    """
-
-    def namelist(self):
-        with contextlib.suppress(AttributeError):
-            return self.__names
-        self.__names = super(FastLookup, self).namelist()
-        return self.__names
-
-    def _name_set(self):
-        with contextlib.suppress(AttributeError):
-            return self.__lookup
-        self.__lookup = super(FastLookup, self)._name_set()
-        return self.__lookup
-
-
-def _pathlib_compat(path):
-    """
-    For path-like objects, convert to a filename for compatibility
-    on Python 3.6.1 and earlier.
-    """
-    try:
-        return path.__fspath__()
-    except AttributeError:
-        return str(path)
-
-
-class Path:
-    """
-    A pathlib-compatible interface for zip files.
-
-    Consider a zip file with this structure::
-
-        .
-        ├── a.txt
-        └── b
-            ├── c.txt
-            └── d
-                └── e.txt
-
-    >>> data = io.BytesIO()
-    >>> zf = zipfile.ZipFile(data, 'w')
-    >>> zf.writestr('a.txt', 'content of a')
-    >>> zf.writestr('b/c.txt', 'content of c')
-    >>> zf.writestr('b/d/e.txt', 'content of e')
-    >>> zf.filename = 'mem/abcde.zip'
-
-    Path accepts the zipfile object itself or a filename
-
-    >>> root = Path(zf)
-
-    From there, several path operations are available.
-
-    Directory iteration (including the zip file itself):
-
-    >>> a, b = root.iterdir()
-    >>> a
-    Path('mem/abcde.zip', 'a.txt')
-    >>> b
-    Path('mem/abcde.zip', 'b/')
-
-    name property:
-
-    >>> b.name
-    'b'
-
-    join with divide operator:
-
-    >>> c = b / 'c.txt'
-    >>> c
-    Path('mem/abcde.zip', 'b/c.txt')
-    >>> c.name
-    'c.txt'
-
-    Read text:
-
-    >>> c.read_text()
-    'content of c'
-
-    existence:
-
-    >>> c.exists()
-    True
-    >>> (b / 'missing.txt').exists()
-    False
-
-    Coercion to string:
-
-    >>> import os
-    >>> str(c).replace(os.sep, posixpath.sep)
-    'mem/abcde.zip/b/c.txt'
-
-    At the root, ``name``, ``filename``, and ``parent``
-    resolve to the zipfile. Note these attributes are not
-    valid and will raise a ``ValueError`` if the zipfile
-    has no filename.
-
-    >>> root.name
-    'abcde.zip'
-    >>> str(root.filename).replace(os.sep, posixpath.sep)
-    'mem/abcde.zip'
-    >>> str(root.parent)
-    'mem'
-    """
-
-    __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
-
-    def __init__(self, root, at=""):
-        """
-        Construct a Path from a ZipFile or filename.
-
-        Note: When the source is an existing ZipFile object,
-        its type (__class__) will be mutated to a
-        specialized type. If the caller wishes to retain the
-        original type, the caller should either create a
-        separate ZipFile object or pass a filename.
-        """
-        self.root = FastLookup.make(root)
-        self.at = at
-
-    def open(self, mode='r', *args, pwd=None, **kwargs):
-        """
-        Open this entry as text or binary following the semantics
-        of ``pathlib.Path.open()`` by passing arguments through
-        to io.TextIOWrapper().
-        """
-        if self.is_dir():
-            raise IsADirectoryError(self)
-        zip_mode = mode[0]
-        if not self.exists() and zip_mode == 'r':
-            raise FileNotFoundError(self)
-        stream = self.root.open(self.at, zip_mode, pwd=pwd)
-        if 'b' in mode:
-            if args or kwargs:
-                raise ValueError("encoding args invalid for binary operation")
-            return stream
-        return io.TextIOWrapper(stream, *args, **kwargs)
-
-    @property
-    def name(self):
-        return pathlib.Path(self.at).name or self.filename.name
-
-    @property
-    def suffix(self):
-        return pathlib.Path(self.at).suffix or self.filename.suffix
-
-    @property
-    def suffixes(self):
-        return pathlib.Path(self.at).suffixes or self.filename.suffixes
-
-    @property
-    def stem(self):
-        return pathlib.Path(self.at).stem or self.filename.stem
-
-    @property
-    def filename(self):
-        return pathlib.Path(self.root.filename).joinpath(self.at)
-
-    def read_text(self, *args, **kwargs):
-        with self.open('r', *args, **kwargs) as strm:
-            return strm.read()
-
-    def read_bytes(self):
-        with self.open('rb') as strm:
-            return strm.read()
-
-    def _is_child(self, path):
-        return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
-
-    def _next(self, at):
-        return self.__class__(self.root, at)
-
-    def is_dir(self):
-        return not self.at or self.at.endswith("/")
-
-    def is_file(self):
-        return self.exists() and not self.is_dir()
-
-    def exists(self):
-        return self.at in self.root._name_set()
-
-    def iterdir(self):
-        if not self.is_dir():
-            raise ValueError("Can't listdir a file")
-        subs = map(self._next, self.root.namelist())
-        return filter(self._is_child, subs)
-
-    def __str__(self):
-        return posixpath.join(self.root.filename, self.at)
-
-    def __repr__(self):
-        return self.__repr.format(self=self)
-
-    def joinpath(self, *other):
-        next = posixpath.join(self.at, *map(_pathlib_compat, other))
-        return self._next(self.root.resolve_dir(next))
-
-    __truediv__ = joinpath
-
-    @property
-    def parent(self):
-        if not self.at:
-            return self.filename.parent
-        parent_at = posixpath.dirname(self.at.rstrip('/'))
-        if parent_at:
-            parent_at += '/'
-        return self._next(parent_at)
diff --git a/env/lib/python3.10/site-packages/pkg_resources/extern/__init__.py b/env/lib/python3.10/site-packages/pkg_resources/extern/__init__.py
deleted file mode 100644
index 70897ee..0000000
--- a/env/lib/python3.10/site-packages/pkg_resources/extern/__init__.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import importlib.util
-import sys
-
-
-class VendorImporter:
-    """
-    A PEP 302 meta path importer for finding optionally-vendored
-    or otherwise naturally-installed packages from root_name.
-    """
-
-    def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
-        self.root_name = root_name
-        self.vendored_names = set(vendored_names)
-        self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
-
-    @property
-    def search_path(self):
-        """
-        Search first the vendor package then as a natural package.
-        """
-        yield self.vendor_pkg + '.'
-        yield ''
-
-    def _module_matches_namespace(self, fullname):
-        """Figure out if the target module is vendored."""
-        root, base, target = fullname.partition(self.root_name + '.')
-        return not root and any(map(target.startswith, self.vendored_names))
-
-    def load_module(self, fullname):
-        """
-        Iterate over the search path to locate and load fullname.
-        """
-        root, base, target = fullname.partition(self.root_name + '.')
-        for prefix in self.search_path:
-            try:
-                extant = prefix + target
-                __import__(extant)
-                mod = sys.modules[extant]
-                sys.modules[fullname] = mod
-                return mod
-            except ImportError:
-                pass
-        else:
-            raise ImportError(
-                "The '{target}' package is required; "
-                "normally this is bundled with this package so if you get "
-                "this warning, consult the packager of your "
-                "distribution.".format(**locals())
-            )
-
-    def create_module(self, spec):
-        return self.load_module(spec.name)
-
-    def exec_module(self, module):
-        pass
-
-    def find_spec(self, fullname, path=None, target=None):
-        """Return a module spec for vendored names."""
-        return (
-            importlib.util.spec_from_loader(fullname, self)
-            if self._module_matches_namespace(fullname) else None
-        )
-
-    def install(self):
-        """
-        Install this importer into sys.meta_path if not already present.
-        """
-        if self not in sys.meta_path:
-            sys.meta_path.append(self)
-
-
-names = (
-    'packaging', 'pyparsing', 'appdirs', 'jaraco', 'importlib_resources',
-    'more_itertools',
-)
-VendorImporter(__name__, names).install()
diff --git a/env/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc b/env/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc
deleted file mode 100644
index 059738b..0000000
Binary files a/env/lib/python3.10/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-310.pyc and /dev/null differ
-- 
cgit v1.2.3-59-g8ed1b